content
stringlengths
10
4.9M
// since maps can be stored in variables and are generated on the fly, // we sometimes need the main block to associate them with func (m *Map) mainBlock() block { var b block = m for b.parentBlock() != nil { b = b.parentBlock() } return b }
import { Divider } from '@/components/Divider'; import { FileServerHost } from './components/FileServerHost'; import { PS4Host } from './components/PS4Host'; export const Hosts = () => { return ( <> <FileServerHost /> <Divider /> <PS4Host /> </> ); };
def avote_random(avote): if avote is None: return None raw_val = DEFAULT_DISTRIB.rvs(size=1)[0] return avote + raw_val
/// call a LuaRef as a function (or table with __call metamethod) /// /// @param ref the reference to call (not consumed) /// @param name if non-NULL, sent to callback as first arg /// if NULL, only args are used /// @param retval if true, convert return value to Object /// if false, discard return value /// @param err Error details, if any (if NULL, errors are echoed) /// @return Return value of function, if retval was set. Otherwise NIL. Object nlua_call_ref(LuaRef ref, const char *name, Array args, bool retval, Error *err) { lua_State *const lstate = global_lstate; nlua_pushref(lstate, ref); int nargs = (int)args.size; if (name != NULL) { lua_pushstring(lstate, name); nargs++; } for (size_t i = 0; i < args.size; i++) { nlua_push_Object(lstate, args.items[i], false); } if (lua_pcall(lstate, nargs, retval ? 1 : 0, 0)) { if (err) { size_t len; const char *errstr = lua_tolstring(lstate, -1, &len); api_set_error(err, kErrorTypeException, "Error executing lua: %.*s", (int)len, errstr); } else { nlua_error(lstate, _("Error executing lua callback: %.*s")); } return NIL; } if (retval) { Error dummy = ERROR_INIT; if (err == NULL) { err = &dummy; } return nlua_pop_Object(lstate, false, err); } else { return NIL; } }
// IsSliceContainsString method checks given string in the slice if found returns // true otherwise false. func IsSliceContainsString(strSlice []string, search string) bool { for _, str := range strSlice { if strings.EqualFold(str, search) { return true } } return false }
//************************************** // DistanceVertexToFace // Projected vertex is stored in // pVertexProjected //************************************** double SquareDistanceVertexToFace(CVertex3d *pVertex, CFace3d *pFace, CVertex3d *pVertexProjected) { ProjectPointOnFace(pVertex,pFace,pVertexProjected); if(VertexInFace(pVertexProjected,pFace)) { return DistanceSquare(pVertexProjected,pVertex); } CVertex3d VertexProjectedOnLine[3]; int i; for(i=0;i<3;i++) { ProjectVertexOnLine(pVertexProjected,pFace->v(i),pFace->v((i+1)%3),&VertexProjectedOnLine[i]); } int success = 0; for(i=0;i<3;i++) if(VertexOnSegment(&VertexProjectedOnLine[i],pFace->v(i),pFace->v((i+1)%3))) { success = 1; VertexProjectedOnLine[i].SetFlag(1); } if(success) { CVertex3d *pVertexTmp = pFace->FindNearestVertex(pVertexProjected); CVertex3d VertexProjectedFinal(pVertexTmp); double MinDistance = DistanceSquare(pVertexProjected,pVertexTmp); for(i=0;i<3;i++) if(VertexProjectedOnLine[i].GetFlag()) { VertexProjectedOnLine[i].SetFlag(0); double tmp = DistanceSquare(pVertexProjected,&VertexProjectedOnLine[i]); if(tmp <= MinDistance) { VertexProjectedFinal.Set(VertexProjectedOnLine[i]); MinDistance = tmp; } } pVertexProjected->Set(VertexProjectedFinal); return DistanceSquare(&VertexProjectedFinal,pVertex); } CVertex3d *pVertexProjectedFinal = pFace->FindNearestVertex(pVertexProjected); pVertexProjected->Set(pVertexProjectedFinal); return DistanceSquare(pVertexProjectedFinal,pVertex); }
def _prepare_factor(self, hdr, year, day, month): if "GLONASS SLOT / FRQ #" not in hdr: glonnas_channel = dw.DownloadGlonassChannel(year, day, month) glonnas_channel.download() glonass_channels_parser = pr.ParserChannels(glonnas_channel.file_uncompressed) glonass_channels_parser.parser() else: glonass_channels_parser = pr.ParserRinexChannels(hdr['GLONASS SLOT / FRQ #']) glonass_channels_parser.parser() return glonass_channels_parser.parsed
def prepare(self): super(BlazeMeterUploader, self).prepare() self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval)) self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring) monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500) self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log) self.browser_open = self.settings.get("browser-open", self.browser_open) self.public_report = self.settings.get("public-report", self.public_report) self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi) token = self.settings.get("token", "") if not token: self.log.warning("No BlazeMeter API key provided, will upload anonymously") self._user.token = token self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit) self._user.address = self.settings.get("address", self._user.address).rstrip("/") self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/") self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout)) sess_id = self.parameters.get("session-id") if sess_id: self._session = Session(self._user, {'id': sess_id}) self._session['userId'] = self.parameters.get("user-id", None) self._session['testId'] = self.parameters.get("test-id", None) self._test = Test(self._user, {'id': self._session['testId']}) exc = TaurusConfigError("Need signature for session") self._session.data_signature = self.parameters.get("signature", exc) self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target) self.send_data = self.parameters.get("send-data", self.send_data) self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts) else: try: self._user.ping() except HTTPError: self.log.error("Cannot reach online results storage, maybe the address/token is wrong") raise if token: wsp = self._user.accounts().workspaces() if not wsp: raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support") finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log) self._test = finder.resolve_external_test() else: self._test = Test(self._user, {'id': None}) self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name)) if self.report_name == 'ask' and sys.stdin.isatty(): self.report_name = r_input("Please enter report-name: ") if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self) for service in self.engine.services: if isinstance(service, Monitoring): service.add_listener(self)
// This file is part of simple-http-server. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/simple-http-server/master/COPYRIGHT. No part of simple-http-server, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2018 The developers of simple-http-server. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/simple-http-server/master/COPYRIGHT. #[link(name = "c")] extern "C" { /// `getsockname()` returns the current address to which the socket `sockfd` is bound, in the buffer pointed to by `addr`. /// The `addrlen` argument should be initialized to indicate the amount of space (in bytes) pointed to by `addr`. /// On return it contains the actual size of the socket address. /// /// The returned address is truncated if the buffer provided is too small; in this case, addrlen will return a value greater than was supplied to the call. /// /// On success, zero is returned. /// On error, `-1` is returned, and `errno` is set appropriately. /// /// The known errors that can be set in `errno` are:- /// /// * `EBADF`: The argument `sockfd` is not a valid file descriptor. /// * `EFAULT`: The `addr` argument points to memory not in a valid part of the process address space. /// * `EINVAL`: `addrlen` is invalid. /// * `ENOBUFS`: Insufficient resources were available in the system to perform the operation. /// * `ENOTSOCK`: The file descriptor `sockfd` does not refer to a socket. pub(crate) fn getsockname(sockfd: RawFd, addr: *mut sockaddr_storage, addrlen: *mut socklen_t) -> c_int; }
<filename>components/Footer.tsx<gh_stars>0 import { FC } from "react"; import Heading from "./Heading"; const Footer: FC = () => ( <footer> <Heading tag="h3" text="Created by webDev" /> </footer> ); export default Footer;
def _build_feature_references(feature_ref_strs: List[str]) -> List[FeatureRefProto]: feature_refs = [FeatureRef.from_str(ref_str) for ref_str in feature_ref_strs] feature_ref_protos = [ref.to_proto() for ref in feature_refs] return feature_ref_protos
<filename>src/types/interfaces.d.ts interface iOption { alternative: string; is_correct: boolean; } interface iQuestion { title: string; difficulty: number; options: iOption[] }
#include "xvmaininc.h" #include "defines.h" #include "declares.h" #include "externs.h" /* Finish up a file by writing the EOF mark, etc. */ int v2_finish_file(unit) int unit; { struct bufstate *bufstate; struct devstate *devstate; int status; int tindex; V2_OFFSET j; bufstate = (struct bufstate *)CURRENT_IP_VALUE(BUFSTATE); devstate = &bufstate->devstate; status = io_complete_check(devstate); /* wait for any pending reads/writes */ if (status != SUCCESS) return status; switch (devstate->device_type) { /* close the file */ #if RTL_USE_TAPE case DEV_TAPE: tindex = devstate->dev.tape.tindex; i_open[tindex] = FALSE; /* Indicate not opened to TAE */ if (EQUAL(CURRENT_S_VALUE(OP), "WRITE")) { status = double_eof(&devstate->dev.tape); /* Write the eof marks */ if (status != SUCCESS) return status; } break; #endif case DEV_ANSI: break; case DEV_MEMORY: /* No close processing necessary */ break; case DEV_ARRAY: /* Falls through to the DISK close routine */ case DEV_DISK: /* If file is an output file, update the eof in the file header */ if (!EQUAL(CURRENT_S_VALUE(OP),"READ")) { /* Calculate the total number of bytes in the file depending on the type of */ /* file we have. Unlabeled output files check the actual number of records */ /* written, while input files and labeled files use file size info. */ if ((CURRENT_I_VALUE(FLAGS) & NO_LABELS) && (EQUAL(CURRENT_S_VALUE(OP),"WRITE"))) { j = bufstate->eof_record * bufstate->recsize; } else { j = CURRENT_I_VALUE(N1) * CURRENT_I_VALUE(PIX_SIZE) + CURRENT_I_VALUE(NBB); j = j * ((CURRENT_I_VALUE(N2) * CURRENT_I_VALUE(N3)) + CURRENT_I_VALUE(NLB)) + CURRENT_I_VALUE(LBLSIZE) + CURRENT_I_VALUE(EOL_SIZE); } status = v2_write_disk_eof(unit, &devstate->dev.disk, j, bufstate->recsize); if (status != SUCCESS) return status; } break; case DEV_DECNET: break; } return SUCCESS; }
<reponame>gigwegbe/deepstream-services-library /* The MIT License Copyright (c) 2021, Prominence AI, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in- all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "catch.hpp" #include "Dsl.h" #include "DslApi.h" #define TIME_TO_SLEEP_FOR std::chrono::milliseconds(1000) SCENARIO( "A single Player is created and deleted correctly", "[player-api]" ) { GIVEN( "An empty list of Players" ) { std::wstring player_name = L"player"; std::wstring source_name = L"file-source"; std::wstring file_path = L"./test/streams/sample_1080p_h264.mp4"; std::wstring sinkName = L"window-sink"; uint offsetX(0); uint offsetY(0); uint sinkW(1280); uint sinkH(720); REQUIRE( dsl_source_file_new(source_name.c_str(), file_path.c_str(), false) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_sink_window_new(sinkName.c_str(), offsetX, offsetY, sinkW, sinkH) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_list_size() == 0 ); WHEN( "A new Player is created" ) { REQUIRE( dsl_player_new(player_name.c_str(), source_name.c_str(), sinkName.c_str()) == DSL_RESULT_SUCCESS ); THEN( "The list size and contents are updated correctly" ) { REQUIRE( dsl_player_list_size() == 1 ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_list_size() == 0 ); REQUIRE( dsl_component_delete_all() == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "A single Player can Play, Pause, and Stop", "[player-api]" ) { GIVEN( "An empty list of Players" ) { std::wstring player_name = L"player"; std::wstring source_name = L"file-source"; std::wstring file_path = L"./test/streams/sample_1080p_h264.mp4"; std::wstring sinkName = L"window-sink"; uint offsetX(0); uint offsetY(0); uint sinkW(1280); uint sinkH(720); REQUIRE( dsl_source_file_new(source_name.c_str(), file_path.c_str(), false) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_sink_window_new(sinkName.c_str(), offsetX, offsetY, sinkW, sinkH) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_list_size() == 0 ); WHEN( "A new Player is created" ) { REQUIRE( dsl_player_new(player_name.c_str(), source_name.c_str(), sinkName.c_str()) == DSL_RESULT_SUCCESS ); THEN( "The list size and contents are updated correctly" ) { REQUIRE( dsl_player_play(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_pause(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_play(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_stop(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_component_delete_all() == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "A File Render Player can Play, Pause, and Stop", "[player-api]" ) { GIVEN( "An empty list of Players" ) { std::wstring player_name = L"player"; std::wstring file_path = L"./test/streams/sample_1080p_h264.mp4"; uint offsetX(0); uint offsetY(0); REQUIRE( dsl_player_list_size() == 0 ); WHEN( "A new Player is created" ) { REQUIRE( dsl_player_render_video_new(player_name.c_str(),file_path.c_str(), DSL_RENDER_TYPE_WINDOW, 10, 10, 75, false) == DSL_RESULT_SUCCESS ); THEN( "The list size and contents are updated correctly" ) { REQUIRE( dsl_player_play(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_pause(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_play(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_stop(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_component_delete_all() == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "An Image Render Player can Play, Pause, and Stop", "[player-api]" ) { GIVEN( "An empty list of Players" ) { std::wstring player_name = L"player"; std::wstring file_path = L"./test/streams/first-person-occurrence-438.jpeg"; REQUIRE( dsl_player_list_size() == 0 ); WHEN( "A new Player is created" ) { REQUIRE( dsl_player_render_image_new(player_name.c_str(),file_path.c_str(), DSL_RENDER_TYPE_WINDOW, 10, 10, 75, 0) == DSL_RESULT_SUCCESS ); THEN( "The list size and contents are updated correctly" ) { REQUIRE( dsl_player_play(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_pause(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_play(player_name.c_str()) == DSL_RESULT_SUCCESS ); std::this_thread::sleep_for(TIME_TO_SLEEP_FOR); REQUIRE( dsl_player_stop(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_component_delete_all() == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "An Image Render Player's Attributes are updated correctly'", "[player-api]" ) { GIVEN( "A new Image Render Player with Window Sink" ) { std::wstring player_name = L"player"; std::wstring file_path = L"./test/streams/first-person-occurrence-438.jpeg"; uint offsetX(123); uint offsetY(123); uint retOffsetX(0); uint retOffsetY(0); uint zoom(75), retZoom(57); uint timeout(0), retTimeout(444); REQUIRE( dsl_player_list_size() == 0 ); REQUIRE( dsl_player_render_image_new(player_name.c_str(), file_path.c_str(), DSL_RENDER_TYPE_WINDOW, offsetX, offsetY, zoom, timeout) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_render_offsets_get(player_name.c_str(), &retOffsetX, &retOffsetY) == DSL_RESULT_SUCCESS ); REQUIRE( retOffsetX == offsetX ); REQUIRE( retOffsetY == offsetY ); REQUIRE( dsl_player_render_zoom_get(player_name.c_str(), &retZoom) == DSL_RESULT_SUCCESS ); REQUIRE( retZoom == zoom ); REQUIRE( dsl_player_render_image_timeout_get(player_name.c_str(), &retTimeout) == DSL_RESULT_SUCCESS ); REQUIRE( retTimeout == timeout ); WHEN( "A the Player's Attributes are Set" ) { uint newOffsetX(321), newOffsetY(321); REQUIRE( dsl_player_render_offsets_set(player_name.c_str(), newOffsetX, newOffsetY) == DSL_RESULT_SUCCESS ); uint newZoom(543); REQUIRE( dsl_player_render_zoom_set(player_name.c_str(), newZoom) == DSL_RESULT_SUCCESS ); uint newTimeout(101); REQUIRE( dsl_player_render_image_timeout_set(player_name.c_str(), newTimeout) == DSL_RESULT_SUCCESS ); THEN( "The correct Attribute values are returned on Get" ) { REQUIRE( dsl_player_render_offsets_get(player_name.c_str(), &retOffsetX, &retOffsetY) == DSL_RESULT_SUCCESS ); REQUIRE( retOffsetX == newOffsetX ); REQUIRE( retOffsetY == newOffsetY ); REQUIRE( dsl_player_render_zoom_get(player_name.c_str(), &retZoom) == DSL_RESULT_SUCCESS ); REQUIRE( retZoom == newZoom ); REQUIRE( dsl_player_render_image_timeout_get(player_name.c_str(), &retTimeout) == DSL_RESULT_SUCCESS ); REQUIRE( retTimeout == newTimeout ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_component_delete_all() == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "An Video Render Player's Attributes are updated correctly'", "[mmm]" ) { GIVEN( "A new Video Render Player with Window Sink" ) { std::wstring player_name = L"player"; std::wstring file_path = L"./test/streams/sample_1080p_h264.mp4"; uint offsetX(123); uint offsetY(123); uint retOffsetX(0); uint retOffsetY(0); uint zoom(75), retZoom(57); boolean repeatEnabled(false), retRepeatEnabled(true); REQUIRE( dsl_player_list_size() == 0 ); REQUIRE( dsl_player_render_video_new(player_name.c_str(), file_path.c_str(), DSL_RENDER_TYPE_WINDOW, offsetX, offsetY, zoom, repeatEnabled) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_render_offsets_get(player_name.c_str(), &retOffsetX, &retOffsetY) == DSL_RESULT_SUCCESS ); REQUIRE( retOffsetX == offsetX ); REQUIRE( retOffsetY == offsetY ); REQUIRE( dsl_player_render_zoom_get(player_name.c_str(), &retZoom) == DSL_RESULT_SUCCESS ); REQUIRE( retZoom == zoom ); REQUIRE( dsl_player_render_video_repeat_enabled_get(player_name.c_str(), &retRepeatEnabled) == DSL_RESULT_SUCCESS ); REQUIRE( retRepeatEnabled == repeatEnabled ); WHEN( "A the Player's Attributes are Set" ) { std::wstring new_file_path = L"./test/streams/sample_1080p_h265.mp4"; REQUIRE( dsl_player_render_file_path_set(player_name.c_str(), new_file_path.c_str()) == DSL_RESULT_SUCCESS ); uint newOffsetX(321), newOffsetY(321); REQUIRE( dsl_player_render_offsets_set(player_name.c_str(), newOffsetX, newOffsetY) == DSL_RESULT_SUCCESS ); uint newZoom(543); REQUIRE( dsl_player_render_zoom_set(player_name.c_str(), newZoom) == DSL_RESULT_SUCCESS ); boolean newRepeatEnabled(true); REQUIRE( dsl_player_render_video_repeat_enabled_set(player_name.c_str(), newRepeatEnabled) == DSL_RESULT_SUCCESS ); THEN( "The correct Attribute values are returned on Get" ) { REQUIRE( dsl_player_render_offsets_get(player_name.c_str(), &retOffsetX, &retOffsetY) == DSL_RESULT_SUCCESS ); REQUIRE( retOffsetX == newOffsetX ); REQUIRE( retOffsetY == newOffsetY ); REQUIRE( dsl_player_render_zoom_get(player_name.c_str(), &retZoom) == DSL_RESULT_SUCCESS ); REQUIRE( retZoom == newZoom ); REQUIRE( dsl_player_render_video_repeat_enabled_get(player_name.c_str(), &retRepeatEnabled) == DSL_RESULT_SUCCESS ); REQUIRE( retRepeatEnabled == newRepeatEnabled ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "A Players's XWindow Handle can be Set/Get", "[player-api]" ) { GIVEN( "A new Player" ) { std::wstring player_name = L"player"; std::wstring file_path = L"./test/streams/sample_1080p_h264.mp4"; uint offsetX(123); uint offsetY(123); uint zoom(75); boolean repeatEnabled(false); uint64_t handle(0); REQUIRE( dsl_player_render_video_new(player_name.c_str(), file_path.c_str(), DSL_RENDER_TYPE_WINDOW, offsetX, offsetY, zoom, repeatEnabled) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_xwindow_handle_get(player_name.c_str(), &handle) == DSL_RESULT_SUCCESS ); // must be initialized false REQUIRE( handle == 0 ); WHEN( "When the Player's XWindow Handle is updated" ) { handle = 0x1234567812345678; REQUIRE( dsl_player_xwindow_handle_set(player_name.c_str(), handle) == DSL_RESULT_SUCCESS ); THEN( "The new handle value is returned on get" ) { uint64_t newHandle(0); REQUIRE( dsl_player_xwindow_handle_get(player_name.c_str(), &newHandle) == DSL_RESULT_SUCCESS ); REQUIRE( handle == newHandle ); REQUIRE( dsl_player_delete(player_name.c_str()) == DSL_RESULT_SUCCESS ); } } } } SCENARIO( "The Player API checks for NULL input parameters", "[player-api]" ) { GIVEN( "An empty list of Players" ) { std::wstring player_name(L"player"); std::wstring source_name(L"file-source"); std::wstring file_path(L"./test/streams/sample_1080p_h264.mp4"); std::wstring sink_name(L"window-sink"); uint offsetX(0); uint offsetY(0); uint sinkW(1280); uint sinkH(720); uint timeout(0); uint zoom(100); boolean repeat_enabled(0); REQUIRE( dsl_source_file_new(source_name.c_str(), file_path.c_str(), false) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_sink_window_new(sink_name.c_str(), offsetX, offsetY, sinkW, sinkH) == DSL_RESULT_SUCCESS ); REQUIRE( dsl_player_list_size() == 0 ); WHEN( "When NULL pointers are used as input" ) { THEN( "The API returns DSL_RESULT_INVALID_INPUT_PARAM in all cases" ) { REQUIRE( dsl_player_new(NULL, source_name.c_str(), sink_name.c_str()) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_new(player_name.c_str(), NULL, sink_name.c_str()) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_new(player_name.c_str(), source_name.c_str(), NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_render_reset(NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_render_video_new(NULL, NULL, DSL_RENDER_TYPE_WINDOW, offsetX, offsetY, zoom, repeat_enabled) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_render_image_new(NULL, NULL, DSL_RENDER_TYPE_WINDOW, offsetX, offsetY, zoom, timeout) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_sink_render_reset(NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_play(NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_pause(NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_stop(NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_state_get(NULL, NULL) == DSL_RESULT_INVALID_INPUT_PARAM ); REQUIRE( dsl_player_list_size() == 0 ); REQUIRE( dsl_component_delete_all() == DSL_RESULT_SUCCESS ); } } } }
#include <stdio.h> #include <stdlib.h> #include <string.h> #define BUFSIZE 1024 char buf[BUFSIZE]; int main(void) { char *p; const char *DELIMITER = " "; while (p = strtok(fgets(buf, BUFSIZE, stdin), DELIMITER)) { int res = 0, one = 0, n; if (*p == '0') break; do { n = atoi(p); res += (n < 10 ? n : 10); if (n == 1) one++; } while (p = strtok(NULL, DELIMITER)); while (one-- && res + 10 <= 21) res += 10; if (res > 21) res = 0; printf("%d\n", res); } return 0; }
import java.util.*; public class test { public static void main(String[] args) { Scanner in = new Scanner(System.in); int n = in.nextInt(); int m = in.nextInt(); int mdlN = 0; int mdlM = 0; boolean flag = false; boolean flag2 = false; int i = 1; while (i < n + 1) { String s = in.next(); int b = s.indexOf('B') + 1; if (b > 0) { if (!flag) { mdlM = b; mdlN = i; flag = true; char[] charArray = s.toCharArray(); int k = b + 1; while (k < m) { if (charArray[k] != 'B') { mdlM = (k -1 == mdlM)? mdlM: (((k -1) - mdlM) / 2) + mdlM; k = m; } else if (k + 2 > m - 1) { mdlM = (((k + 1) - mdlM) / 2) + mdlM; } k = k + 2; } } i++; } else { if (flag) { mdlN = (i -1 == mdlN)? mdlN: ((i - 1) - mdlN)/2 + mdlN; flag2 = true; break; } else { i++; } } } if (!flag2) { mdlN = (n - mdlN)/2 + mdlN; } System.out.print(mdlN + " " + mdlM); } }
/** * POSIX pthread version of ThreadMutex. */ class ThreadMutex_pthread : public ThreadMutex { private: pthread_mutex_t mutex; pthread_cond_t condition; pthread_mutexattr_t attr; public: ThreadMutex_pthread() { pthread_mutexattr_init(&this->attr); pthread_mutexattr_settype(&this->attr, PTHREAD_MUTEX_ERRORCHECK); pthread_mutex_init(&this->mutex, &this->attr); pthread_cond_init(&this->condition, NULL); } ~ThreadMutex_pthread() { int err = pthread_cond_destroy(&this->condition); assert(err != EBUSY); err = pthread_mutex_destroy(&this->mutex); assert(err != EBUSY); } void BeginCritical() { int err = pthread_mutex_lock(&this->mutex); assert(err == 0); } void EndCritical() { int err = pthread_mutex_unlock(&this->mutex); assert(err == 0); } void WaitForSignal() { int err = pthread_cond_wait(&this->condition, &this->mutex); assert(err == 0); } void SendSignal() { int err = pthread_cond_signal(&this->condition); assert(err == 0); } }
<reponame>cikupin/smooch<filename>smooch_test.go<gh_stars>1-10 package smooch import ( "bytes" "io/ioutil" "mime" "mime/multipart" "net/http" "net/http/httptest" "strings" "time" "testing" "github.com/stretchr/testify/assert" ) const ( expectedAuthorizationHeader = "<KEY>" ) var ( sampleResponse = ` { "message": { "_id": "55c8c1498590aa1900b9b9b1", "authorId": "c<PASSWORD>", "role": "appMaker", "type": "text", "name": "Steve", "text": "Just put some vinegar on it", "avatarUrl": "https://www.gravatar.com/image.jpg", "received": 1439220041.586 }, "extraMessages": [ { "_id": "507f1f77bcf86cd799439011", "authorId": "c<PASSWORD>", "role": "appMaker", "type": "image", "name": "Steve", "text": "Check out this image!", "mediaUrl": "http://example.org/image.jpg", "actions": [ { "text": "More info", "type": "link", "uri": "http://example.org" } ] } ], "conversation": { "_id": "df0ebe56cbeab98589b8bfa7", "unreadCount": 0 } }` sampleWebhookData = ` { "trigger": "message:appUser", "app": { "_id": "5698edbf2a43bd081be982f1" }, "messages": [ { "_id": "55c8c1498590aa1900b9b9b1", "type": "text", "text": "Hi! Do you have time to chat?", "role": "appUser", "authorId": "<PASSWORD>", "name": "Steve", "received": 1444348338.704, "source": { "type": "messenger" } } ], "appUser": { "_id": "c7f6e6d6c3a637261bd9656f", "userId": "<EMAIL>", "conversationStarted": true, "surname": "Steve", "givenName": "Bob", "signedUpAt": "2018-04-02T14:45:46.505Z", "properties": { "favoriteFood": "prizza" } }, "client": { "_id": "5c9d2f34a1d3a2504bc89511", "lastSeen": "2019-04-05T18:23:20.791Z", "platform": "web", "id": "20b2be30cf7e4152865f066930cbb5b2", "info": { "currentTitle": "Conversation Demo", "currentUrl": "https://examples.com/awesomechat", "browserLanguage": "en-US", "referrer": "", "userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36", "URL": "examples.com", "sdkVersion": "4.17.12", "vendor": "smooch" }, "raw": { "currentTitle": "Conversation Demo", "currentUrl": "https://examples.com/awesomechat", "browserLanguage": "en-US", "referrer": "", "userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36", "URL": "examples.com", "sdkVersion": "4.17.12", "vendor": "smooch" }, "active": true, "primary": true, "integrationId": "5c3640f8cd3fa5850931a954" }, "conversation": { "_id": "105e47578be874292d365ee8" }, "version": "v1.1" }` sampleGetUserJson = ` { "appUser": { "_id": "7494535bff5cef41a15be74d", "userId": "<EMAIL>", "givenName": "Steve", "signedUpAt": "2019-01-14T18:55:12.515Z", "hasPaymentInfo": false, "conversationStarted": false, "clients": [ { "_id": "5c93cb748f63db54ff3b51dd", "lastSeen": "2019-01-14T16:55:59.364Z", "platform": "ios", "id": "F272EB80-D512-4C19-9AC0-BD259DAEAD91", "deviceId": "F272EB80-D512-4C19-9AC0-BD259DAEAD91", "pushNotificationToken": "<<PASSWORD>>", "appVersion": "1.0", "info": { "appId": "com.rp.ShellApp", "carrier": "Rogers", "buildNumber": "1.0", "devicePlatform": "iPhone8,4", "installer": "Dev", "sdkVersion": "6.11.2", "osVersion": "12.1.2", "appName": "ShellApp", "os": "iOS", "vendor": "smooch" }, "raw": { "appId": "com.rp.ShellApp", "carrier": "Rogers", "buildNumber": "1.0", "devicePlatform": "iPhone8,4", "installer": "Dev", "sdkVersion": "6.11.2", "osVersion": "12.1.2", "appName": "ShellApp", "os": "iOS", "vendor": "smooch" }, "active": true, "primary": false, "integrationId": "599ad41e49db6e243ad77d2f" } ], "pendingClients": [], "properties": { "favoriteFood": "prizza" } } } ` sampleUploadAttachmentJson = ` { "mediaUrl": "https://media.smooch.io/conversation/c7f6e6d6c3a637261bd9656f/a77caae4cbbd263a0938eba00016b7c8/test.png", "mediaType": "image/png" }` ) type RoundTripFunc func(req *http.Request) *http.Response func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { return f(req), nil } func NewTestClient(fn RoundTripFunc) *http.Client { return &http.Client{ Transport: fn, } } func TestGenerateJWT(t *testing.T) { secret := "a random, long, sequence of characters" token, err := GenerateJWT("app", "vienas", secret) assert.NoError(t, err) assert.NotEmpty(t, token) assert.Equal(t, "<KEY>", token, ) } func TestSendOKResponse(t *testing.T) { fn := func(req *http.Request) *http.Response { assert.Equal(t, http.MethodPost, req.Method) assert.Equal(t, "application/json", req.Header.Get(contentTypeHeaderKey)) assert.Equal(t, expectedAuthorizationHeader, req.Header.Get(authorizationHeaderKey)) return &http.Response{ StatusCode: http.StatusCreated, Body: ioutil.NopCloser(bytes.NewReader([]byte(sampleResponse))), } } sc, err := New(Options{ VerifySecret: "very-secure-test-secret", HttpClient: NewTestClient(fn), }) assert.NoError(t, err) message := &Message{} response, err := sc.Send("", message) assert.Nil(t, response) assert.Error(t, err) assert.EqualError(t, err, ErrUserIDEmpty.Error()) response, err = sc.Send("TestUser", nil) assert.Nil(t, response) assert.Error(t, err) assert.EqualError(t, err, ErrMessageNil.Error()) response, err = sc.Send("TestUser", message) assert.Nil(t, response) assert.Error(t, err) assert.EqualError(t, err, ErrMessageRoleEmpty.Error()) message = &Message{ Role: RoleAppUser, } response, err = sc.Send("TestUser", message) assert.Nil(t, response) assert.Error(t, err) assert.EqualError(t, err, ErrMessageTypeEmpty.Error()) message = &Message{ Role: RoleAppUser, Type: MessageTypeText, } response, err = sc.Send("TestUser", message) assert.NotNil(t, response) assert.NoError(t, err) assert.NotNil(t, response.Message) assert.Equal(t, "55c8c1498590aa1900b9b9b1", response.Message.ID) assert.Equal(t, 1, len(response.ExtraMessages)) assert.Equal(t, "507f1f77bcf86cd799439011", response.ExtraMessages[0].ID) assert.Equal(t, "df0ebe56cbeab98589b8bfa7", response.Conversation.ID) assert.Equal(t, 0, response.Conversation.UnreadCount) } func TestSendErrorResponse(t *testing.T) { errorResponseJson := ` { "error": { "code": "unauthorized", "description": "Authorization is required" } }` fn := func(req *http.Request) *http.Response { assert.Equal(t, http.MethodPost, req.Method) assert.Equal(t, "application/json", req.Header.Get(contentTypeHeaderKey)) assert.Equal(t, expectedAuthorizationHeader, req.Header.Get(authorizationHeaderKey)) return &http.Response{ StatusCode: http.StatusUnauthorized, Body: ioutil.NopCloser(bytes.NewReader([]byte(errorResponseJson))), } } sc, err := New(Options{ VerifySecret: "very-secure-test-secret", HttpClient: NewTestClient(fn), }) assert.NoError(t, err) message := &Message{ Role: RoleAppUser, Type: MessageTypeText, } response, err := sc.Send("TestUser", message) assert.Nil(t, response) assert.Error(t, err) smoochErr := err.(*SmoochError) assert.Equal(t, http.StatusUnauthorized, smoochErr.Code()) assert.Equal(t, "StatusCode: 401 Code: unauthorized Message: Authorization is required", smoochErr.Error(), ) } func TestHandlerOK(t *testing.T) { sc, err := New(Options{ VerifySecret: "very-secure-test-secret", }) assert.NoError(t, err) handlerInvokeCounter := 0 sc.AddWebhookEventHandler(func(payload *Payload) { handlerInvokeCounter++ }) sc.AddWebhookEventHandler(func(payload *Payload) { handlerInvokeCounter++ }) mockData := bytes.NewReader([]byte(sampleWebhookData)) req := httptest.NewRequest(http.MethodPost, "http://example.com/foo", mockData) req.Header.Set("X-Api-Key", "very-secure-test-secret") w := httptest.NewRecorder() handler := sc.Handler() handler.ServeHTTP(w, req) resp := w.Result() body, err := ioutil.ReadAll(resp.Body) assert.NoError(t, err) assert.Equal(t, resp.StatusCode, http.StatusOK) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) assert.Equal(t, "", string(body)) assert.Equal(t, 2, handlerInvokeCounter) } func TestVerifyRequest(t *testing.T) { sc, err := New(Options{ VerifySecret: "very-secure-test-secret", }) r := &http.Request{} assert.NoError(t, err) assert.False(t, sc.VerifyRequest(r)) r = &http.Request{ Header: http.Header{}, } r.Header.Set("X-Api-Key", "very-secure-test-secret") assert.NoError(t, err) assert.True(t, sc.VerifyRequest(r)) sc, err = New(Options{ VerifySecret: "very-secure-test-secret", }) assert.NoError(t, err) headers := http.Header{} headers.Set("X-Api-Key", "very-secure-test-secret-wrong") r = &http.Request{ Header: headers, } assert.NoError(t, err) assert.False(t, sc.VerifyRequest(r)) headers = http.Header{} headers.Set("X-Api-Key", "very-secure-test-secret") r = &http.Request{ Header: headers, } assert.NoError(t, err) assert.True(t, sc.VerifyRequest(r)) } func TestGetAppUser(t *testing.T) { fn := func(req *http.Request) *http.Response { assert.Equal(t, http.MethodGet, req.Method) assert.Equal(t, "application/json", req.Header.Get(contentTypeHeaderKey)) assert.Equal(t, expectedAuthorizationHeader, req.Header.Get(authorizationHeaderKey)) return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader([]byte(sampleGetUserJson))), } } sc, err := New(Options{ VerifySecret: "very-secure-test-secret", HttpClient: NewTestClient(fn), }) assert.NoError(t, err) appUser, err := sc.GetAppUser("123") assert.NotNil(t, appUser) assert.NoError(t, err) assert.Equal(t, "7494535bff5cef41a15be74d", appUser.ID) assert.Equal(t, "<EMAIL>", appUser.UserID) assert.Equal(t, "Steve", appUser.GivenName) assert.Equal(t, "2019-01-14T18:55:12Z", appUser.SignedUpAt.Format(time.RFC3339)) assert.False(t, appUser.ConversationStarted) assert.Equal(t, 1, len(appUser.Clients)) assert.Equal(t, "5c93cb748f63db54ff3b51dd", appUser.Clients[0].ID) assert.Equal(t, "2019-01-14T16:55:59Z", appUser.Clients[0].LastSeen.Format(time.RFC3339)) assert.Equal(t, 0, len(appUser.PendingClients)) } func TestUploadAttachment(t *testing.T) { fn := func(req *http.Request) *http.Response { assert.Equal(t, http.MethodPost, req.Method) assert.True(t, strings.HasPrefix(req.Header.Get(contentTypeHeaderKey), "multipart/form-data; boundary=")) assert.Equal(t, expectedAuthorizationHeader, req.Header.Get(authorizationHeaderKey)) assert.Equal(t, "https://api.smooch.io/v1.1/apps/attachments?access=public", req.URL.String()) assert.Equal(t, "public", req.URL.Query().Get("access")) assert.Equal(t, "", req.URL.Query().Get("for")) assert.Equal(t, "", req.URL.Query().Get("appUserId")) assert.Equal(t, "", req.URL.Query().Get("userId")) mediaType, params, err := mime.ParseMediaType(req.Header.Get("Content-Type")) assert.NoError(t, err) assert.Equal(t, "multipart/form-data", mediaType) assert.Contains(t, params, "boundary") mr := multipart.NewReader(req.Body, params["boundary"]) form, err := mr.ReadForm(20000000) assert.NotNil(t, form) assert.NoError(t, err) assert.Equal(t, "image/png", form.Value["type"][0]) assert.Equal(t, "fixtures/smooch.png", form.File["source"][0].Filename) assert.Equal(t, int64(5834), form.File["source"][0].Size) return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader([]byte(sampleUploadAttachmentJson))), } } sc, err := New(Options{ VerifySecret: "very-secure-test-secret", HttpClient: NewTestClient(fn), }) assert.NoError(t, err) r, err := sc.UploadFileAttachment("fixtures/smooch.png", NewAttachmentUpload("image/png")) assert.NotNil(t, r) assert.NoError(t, err) assert.Equal(t, "image/png", r.MediaType) assert.Equal(t, "https://media.smooch.io/conversation/c7f6e6d6c3a637261bd9656f/a77caae4cbbd263a0938eba00016b7c8/test.png", r.MediaURL, ) r, err = sc.UploadFileAttachment("fixtures/smooch-not-exists.png", NewAttachmentUpload("image/png")) assert.Nil(t, r) assert.Error(t, err) } func TestDeleteAttachment(t *testing.T) { fn := func(req *http.Request) *http.Response { assert.Equal(t, http.MethodPost, req.Method) assert.Equal(t, "application/json", req.Header.Get(contentTypeHeaderKey)) assert.Equal(t, expectedAuthorizationHeader, req.Header.Get(authorizationHeaderKey)) return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), } } sc, err := New(Options{ VerifySecret: "very-secure-test-secret", HttpClient: NewTestClient(fn), }) assert.NoError(t, err) err = sc.DeleteAttachment(&Attachment{ MediaURL: "https://media.smooch.io/conversation/c7f6e6d6c3a637261bd9656f/a77caae4cbbd263a0938eba00016b7c8/test.png", MediaType: "", }) assert.NoError(t, err) }
Skeletal remains in Comstock 5 Gallery: Skeletal remains in Comstock COMSTOCK TOWNSHIP — Seeking leads in a has released illustrations of a woman's face reconstructed from skeletal remains found in Comstock Township late April. Police believe the remains are that of a white women, 35 to 55 years old, who died as a result of "blunt-force trauma." Officials have no leads on the identity of the woman and hope the illustrations, created by the Michigan State University anthropology department, will result in a break in the case, according to Sgt. Bill Sparrow of the sheriff's office. in the 8400 block of East ML Avenue. She is additionally described as being between 5 foot, 4 inches and 5 foot, 8 inches tall. There are several area woman who are missing but authorities have ruled out Tammie Briley, Mary Lands or Venus Stewart as being this unidentified woman. The illustrations show the woman's reconstructed face framed in several different hairstyles. Police ask anyone who may have information to contact the sheriff's office at 269-383-8723 or Silent Observer at 269-343-2100. Contact Paula M. Davis at [email protected] or 269-388-8583. You can also follow her on Twitter.
<reponame>robotics-at-maryland/qubo #include <stdint.h> #include "modules.h" #ifndef QUBOBUS_POWER_H #define QUBOBUS_POWER_H enum { M_ID_POWER_STATUS = M_ID_OFFSET_POWER, M_ID_POWER_RAIL_ENABLE, M_ID_POWER_RAIL_DISABLE, M_ID_POWER_MONITOR_ENABLE, M_ID_POWER_MONITOR_DISABLE, M_ID_POWER_MONITOR_SET_CONFIG, M_ID_POWER_MONITOR_GET_CONFIG }; enum { E_ID_POWER_UNREACHABLE = M_ID_OFFSET_POWER, }; enum { RAIL_ID_3V, RAIL_ID_5V, RAIL_ID_9V, RAIL_ID_12V, RAIL_ID_DVL, RAIL_ID_16V, RAIL_ID_BATT_0, RAIL_ID_BATT_1, RAIL_ID_SHORE }; #define IS_POWER_RAIL_ID(X) ((RAIL_ID_3V <= (X)) && ((X) <= RAIL_ID_SHORE)) struct Power_Rail { uint8_t rail_id; }; struct Power_Status { float voltage; float current; uint8_t rail_id; uint8_t warning_level; }; struct Power_Monitor_Config_Request { uint8_t rail_id; uint8_t warning_level; }; struct Power_Monitor_Config { float voltage[2]; float current[2]; uint8_t rail_id; uint8_t warning_level; }; extern const Transaction tPowerStatus; extern const Transaction tPowerRailEnable; extern const Transaction tPowerRailDisable; extern const Transaction tPowerMonitorEnable; extern const Transaction tPowerMonitorDisable; extern const Transaction tPowerMonitorSetConfig; extern const Transaction tPowerMonitorGetConfig; extern const Error ePowerUnreachable; #endif
<reponame>Phylogeny/BuildingGadgets package com.direwolf20.buildinggadgets.items; import com.direwolf20.buildinggadgets.Config; import com.direwolf20.buildinggadgets.items.ItemCaps.CapabilityProviderEnergy; import net.minecraft.client.renderer.block.model.ModelResourceLocation; import net.minecraft.init.Items; import net.minecraft.item.Item; import net.minecraft.item.ItemStack; import net.minecraft.nbt.NBTTagCompound; import net.minecraft.util.math.MathHelper; import net.minecraftforge.client.model.ModelLoader; import net.minecraftforge.common.capabilities.ICapabilityProvider; import net.minecraftforge.energy.IEnergyStorage; import net.minecraftforge.fml.relauncher.Side; import net.minecraftforge.fml.relauncher.SideOnly; import javax.annotation.Nullable; public class GenericGadget extends Item { @SideOnly(Side.CLIENT) public void initModel() { ModelLoader.setCustomModelResourceLocation(this, 0, new ModelResourceLocation(getRegistryName(), "inventory")); } @Override @Nullable public ICapabilityProvider initCapabilities(ItemStack stack, @Nullable NBTTagCompound tag) { if (Config.poweredByFE) { return new CapabilityProviderEnergy(stack, Config.energyMax); } return null; } @Override public boolean isDamageable() { return true; } @Override public boolean isRepairable() { return false; } @Override public double getDurabilityForDisplay(ItemStack stack) { if (Config.poweredByFE) { IEnergyStorage energy = CapabilityProviderEnergy.getCap(stack); return 1D - ((double) energy.getEnergyStored() / (double) energy.getMaxEnergyStored()); } //return (double)stack.getItemDamage() / (double)stack.getMaxDamage(); return super.getDurabilityForDisplay(stack); } @Override public int getRGBDurabilityForDisplay(ItemStack stack) { if (Config.poweredByFE) { IEnergyStorage energy = CapabilityProviderEnergy.getCap(stack); return MathHelper.hsvToRGB(Math.max(0.0F, (float) energy.getEnergyStored() / (float) energy.getMaxEnergyStored()) / 3.0F, 1.0F, 1.0F); } //return MathHelper.hsvToRGB(Math.max(0.0F, (float) (1.0F - getDurabilityForDisplay(stack))) / 3.0F, 1.0F, 1.0F); return super.getRGBDurabilityForDisplay(stack); } @Override public boolean isDamaged(ItemStack stack) { if (Config.poweredByFE) { IEnergyStorage energy = CapabilityProviderEnergy.getCap(stack); return energy.getEnergyStored() != energy.getMaxEnergyStored(); } //return (stack.getItemDamage() > 0); return super.isDamaged(stack); } @Override public boolean showDurabilityBar(ItemStack stack) { if (Config.poweredByFE) { IEnergyStorage energy = CapabilityProviderEnergy.getCap(stack); return energy.getEnergyStored() != energy.getMaxEnergyStored(); } //return stack.isItemDamaged(); return super.showDurabilityBar(stack); } @Override public boolean getIsRepairable(ItemStack toRepair, ItemStack repair) { if (Config.poweredByFE) { return false; } if (repair.getItem() == Items.DIAMOND) { return true; } return false; } }
/** * Classy class class. */ @RunWith( EsRunner.class ) @UseModules( { TestIndexModule.class } ) public class EsIndexMappingMigrationPluginTest extends BaseIT { @Inject public EntityIndexFactory eif; @Inject public MigrationInfoSerialization serialization; @Inject EsProvider provider; @Test public void runMigration() { MigrationInfoSerialization serialization = Mockito.mock(MigrationInfoSerialization.class); Mockito.when(serialization.getVersion(Mockito.any())).thenReturn(0); EsIndexMappingMigrationPlugin plugin = new EsIndexMappingMigrationPlugin(serialization,provider); TestProgressObserver progressObserver = new TestProgressObserver(); plugin.run(progressObserver); // check for failures assertFalse("Progress observer should not have failed", progressObserver.isFailed()); // after completed, updates could have size 0 or more (0 if no indices present). testing observer's 'updates' // size doesn't help. TODO update test to ensure an index is present before running } }
// Record SCT origin enum. This metric measure the popularity // of the various channels of providing SCTs for a certificate. void LogSCTOriginToUMA(ct::SignedCertificateTimestamp::Origin origin) { UMA_HISTOGRAM_ENUMERATION("Net.CertificateTransparency.SCTOrigin", origin, ct::SignedCertificateTimestamp::SCT_ORIGIN_MAX); }
def scale_styles(self, amount): style_sheet = self.doc.get_style_sheet() graph_style = style_sheet.get_draw_style("CG2-fam-box") graph_style.set_shadow(graph_style.get_shadow(), 0) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2-fam-box", graph_style) graph_style = style_sheet.get_draw_style("CG2-box") graph_style.set_shadow(graph_style.get_shadow(), self.canvas.report_opts.box_shadow * amount) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2-box", graph_style) graph_style = style_sheet.get_draw_style("CG2b-box") graph_style.set_shadow(graph_style.get_shadow(), self.canvas.report_opts.box_shadow * amount) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2b-box", graph_style) graph_style = style_sheet.get_draw_style("CG2-note-box") graph_style.set_shadow(graph_style.get_shadow(), 0) graph_style.set_line_width(graph_style.get_line_width() * amount) style_sheet.add_draw_style("CG2-note-box", graph_style) para_style = style_sheet.get_paragraph_style("CG2-Title") font = para_style.get_font() font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Title", para_style) para_style = style_sheet.get_paragraph_style("CG2-Normal") font = para_style.get_font() font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Normal", para_style) para_style = style_sheet.get_paragraph_style("CG2-Bold") font = para_style.get_font() font.set_bold(True) font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Bold", para_style) para_style = style_sheet.get_paragraph_style("CG2-Note") font = para_style.get_font() font.set_size(font.get_size() * amount) para_style.set_font(font) style_sheet.add_paragraph_style("CG2-Note", para_style) self.doc.set_style_sheet(style_sheet)
<filename>flowable-manager-ui/src/components/GlobalHeader/NoticeIconView.tsx import React, { Component } from 'react'; import { Tag, message } from 'antd'; import { connect } from 'dva'; import groupBy from 'lodash/groupBy'; import moment from 'moment'; import { NoticeItem } from '@/models/global'; import NoticeIcon from '../NoticeIcon'; import { CurrentUser } from '@/models/user'; import { ConnectProps, ConnectState } from '@/models/connect'; import styles from './index.less'; export interface GlobalHeaderRightProps extends ConnectProps { notices?: NoticeItem[]; currentUser?: CurrentUser; fetchingNotices?: boolean; onNoticeVisibleChange?: (visible: boolean) => void; onNoticeClear?: (tabName?: string) => void; } class GlobalHeaderRight extends Component<GlobalHeaderRightProps> { componentDidMount() { const { dispatch } = this.props; if (dispatch) { dispatch({ type: 'global/fetchNotices', }); } } changeReadState = (clickedItem: NoticeItem): void => { const { id } = clickedItem; const { dispatch } = this.props; if (dispatch) { dispatch({ type: 'global/changeNoticeReadState', payload: id, }); } }; handleNoticeClear = (title: string, key: string) => { const { dispatch } = this.props; message.success(`${'清空了'} ${title}`); if (dispatch) { dispatch({ type: 'global/clearNotices', payload: key, }); } }; getNoticeData = (): { [key: string]: NoticeItem[]; } => { const { notices = [] } = this.props; if (notices.length === 0) { return {}; } const newNotices = notices.map(notice => { const newNotice = { ...notice }; if (newNotice.datetime) { newNotice.datetime = moment(notice.datetime as string).fromNow(); } if (newNotice.id) { newNotice.key = newNotice.id; } if (newNotice.extra && newNotice.status) { const color = { todo: '', processing: 'blue', urgent: 'red', doing: 'gold', }[newNotice.status]; newNotice.extra = ( <Tag color={color} style={{ marginRight: 0, }} > {newNotice.extra} </Tag> ); } return newNotice; }); return groupBy(newNotices, 'type'); }; getUnreadData = (noticeData: { [key: string]: NoticeItem[] }) => { const unreadMsg: { [key: string]: number; } = {}; Object.keys(noticeData).forEach(key => { const value = noticeData[key]; if (!unreadMsg[key]) { unreadMsg[key] = 0; } if (Array.isArray(value)) { unreadMsg[key] = value.filter(item => !item.read).length; } }); return unreadMsg; }; render() { const { currentUser, fetchingNotices, onNoticeVisibleChange } = this.props; const noticeData = this.getNoticeData(); const unreadMsg = this.getUnreadData(noticeData); return ( <NoticeIcon className={styles.action} count={currentUser && currentUser.unreadCount} onItemClick={item => { this.changeReadState(item as NoticeItem); }} loading={fetchingNotices} clearText="清空" viewMoreText="查看更多" onClear={this.handleNoticeClear} onPopupVisibleChange={onNoticeVisibleChange} onViewMore={() => message.info('Click on view more')} clearClose > <NoticeIcon.Tab tabKey="notification" count={unreadMsg.notification} list={noticeData.notification} title="通知" emptyText="你已查看所有通知" showViewMore /> <NoticeIcon.Tab tabKey="message" count={unreadMsg.message} list={noticeData.message} title="消息" emptyText="您已读完所有消息" showViewMore /> <NoticeIcon.Tab tabKey="event" title="待办" emptyText="你已完成所有待办" count={unreadMsg.event} list={noticeData.event} showViewMore /> </NoticeIcon> ); } } export default connect(({ user, global, loading }: ConnectState) => ({ currentUser: user.currentUser, collapsed: global.collapsed, fetchingMoreNotices: loading.effects['global/fetchMoreNotices'], fetchingNotices: loading.effects['global/fetchNotices'], notices: global.notices, }))(GlobalHeaderRight);
import { AccessLevelList } from "../shared/access-level"; import { PolicyStatement } from "../shared"; /** * Statement provider for service [honeycode](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonhoneycode.html). * * @param sid [SID](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_sid.html) of the statement */ export class Honeycode extends PolicyStatement { public servicePrefix = 'honeycode'; /** * Statement provider for service [honeycode](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonhoneycode.html). * * @param sid [SID](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_sid.html) of the statement */ constructor (sid?: string) { super(sid); } /** * Grants permission to approve a team association request for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/team-association.html#approve-team-association */ public toApproveTeamAssociation() { return this.to('ApproveTeamAssociation'); } /** * Grants permission to create new rows in a table * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_BatchCreateTableRows.html */ public toBatchCreateTableRows() { return this.to('BatchCreateTableRows'); } /** * Grants permission to delete rows from a table * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_BatchDeleteTableRows.html */ public toBatchDeleteTableRows() { return this.to('BatchDeleteTableRows'); } /** * Grants permission to update rows in a table * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_BatchUpdateTableRows.html */ public toBatchUpdateTableRows() { return this.to('BatchUpdateTableRows'); } /** * Grants permission to upsert rows in a table * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_BatchUpsertTableRows.html */ public toBatchUpsertTableRows() { return this.to('BatchUpsertTableRows'); } /** * Grants permission to create a new Amazon Honeycode team for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/team.html#create-team */ public toCreateTeam() { return this.to('CreateTeam'); } /** * Grants permission to create a new tenant within Amazon Honeycode for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/tenant.html#create-tenant */ public toCreateTenant() { return this.to('CreateTenant'); } /** * Grants permission to remove groups from an Amazon Honeycode team for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/group.html#deregister-groups */ public toDeregisterGroups() { return this.to('DeregisterGroups'); } /** * Grants permission to get details about a table data import job * * Access Level: Read * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_DescribeTableDataImportJob.html */ public toDescribeTableDataImportJob() { return this.to('DescribeTableDataImportJob'); } /** * Grants permission to get details about Amazon Honeycode teams for your AWS Account * * Access Level: Read * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/team.html#describe-team */ public toDescribeTeam() { return this.to('DescribeTeam'); } /** * Grants permission to load the data from a screen * * Access Level: Read * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_GetScreenData.html */ public toGetScreenData() { return this.to('GetScreenData'); } /** * Grants permission to invoke a screen automation * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_InvokeScreenAutomation.html */ public toInvokeScreenAutomation() { return this.to('InvokeScreenAutomation'); } /** * Grants permission to list all Amazon Honeycode domains and their verification status for your AWS Account * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/domain.html#list-domains */ public toListDomains() { return this.to('ListDomains'); } /** * Grants permission to list all groups in an Amazon Honeycode team for your AWS Account * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/group.html#list-groups */ public toListGroups() { return this.to('ListGroups'); } /** * Grants permission to list the columns in a table * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_ListTableColumns.html */ public toListTableColumns() { return this.to('ListTableColumns'); } /** * Grants permission to list the rows in a table * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_ListTableRows.html */ public toListTableRows() { return this.to('ListTableRows'); } /** * Grants permission to list the tables in a workbook * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_ListTables.html */ public toListTables() { return this.to('ListTables'); } /** * Grants permission to list all pending and approved team associations with your AWS Account * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/team-association.html#list-team-associations */ public toListTeamAssociations() { return this.to('ListTeamAssociations'); } /** * Grants permission to list all tenants of Amazon Honeycode for your AWS Account * * Access Level: List * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/tenant.html#list-tenants */ public toListTenants() { return this.to('ListTenants'); } /** * Grants permission to query the rows of a table using a filter * * Access Level: Read * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_QueryTableRows.html */ public toQueryTableRows() { return this.to('QueryTableRows'); } /** * Grants permission to request verification of the Amazon Honeycode domains for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/domain.html#register-domain-for-verification */ public toRegisterDomainForVerification() { return this.to('RegisterDomainForVerification'); } /** * Grants permission to add groups to an Amazon Honeycode team for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/group.html#register-groups */ public toRegisterGroups() { return this.to('RegisterGroups'); } /** * Grants permission to reject a team association request for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/team-association.html#reject-team-association */ public toRejectTeamAssociation() { return this.to('RejectTeamAssociation'); } /** * Grants permission to restart verification of the Amazon Honeycode domains for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/domain.html#restart-domain-verification */ public toRestartDomainVerification() { return this.to('RestartDomainVerification'); } /** * Grants permission to start a table data import job * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/API_StartTableDataImportJob.html */ public toStartTableDataImportJob() { return this.to('StartTableDataImportJob'); } /** * Grants permission to update an Amazon Honeycode team for your AWS Account * * Access Level: Write * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/team.html#update-team */ public toUpdateTeam() { return this.to('UpdateTeam'); } protected accessLevelList: AccessLevelList = { "Write": [ "ApproveTeamAssociation", "BatchCreateTableRows", "BatchDeleteTableRows", "BatchUpdateTableRows", "BatchUpsertTableRows", "CreateTeam", "CreateTenant", "DeregisterGroups", "InvokeScreenAutomation", "RegisterDomainForVerification", "RegisterGroups", "RejectTeamAssociation", "RestartDomainVerification", "StartTableDataImportJob", "UpdateTeam" ], "Read": [ "DescribeTableDataImportJob", "DescribeTeam", "GetScreenData", "QueryTableRows" ], "List": [ "ListDomains", "ListGroups", "ListTableColumns", "ListTableRows", "ListTables", "ListTeamAssociations", "ListTenants" ] }; /** * Adds a resource of type workbook to the statement * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/resource-workbook.html * * @param workbookId - Identifier for the workbookId. * @param account - Account of the resource; defaults to empty string: all accounts. * @param region - Region of the resource; defaults to empty string: all regions. * @param partition - Partition of the AWS account [aws, aws-cn, aws-us-gov]; defaults to `aws`. */ public onWorkbook(workbookId: string, account?: string, region?: string, partition?: string) { var arn = 'arn:${Partition}:honeycode:${Region}:${Account}:workbook:workbook/${WorkbookId}'; arn = arn.replace('${WorkbookId}', workbookId); arn = arn.replace('${Account}', account || '*'); arn = arn.replace('${Region}', region || '*'); arn = arn.replace('${Partition}', partition || 'aws'); return this.on(arn); } /** * Adds a resource of type table to the statement * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/resource-table.html * * @param workbookId - Identifier for the workbookId. * @param tableId - Identifier for the tableId. * @param account - Account of the resource; defaults to empty string: all accounts. * @param region - Region of the resource; defaults to empty string: all regions. * @param partition - Partition of the AWS account [aws, aws-cn, aws-us-gov]; defaults to `aws`. */ public onTable(workbookId: string, tableId: string, account?: string, region?: string, partition?: string) { var arn = 'arn:${Partition}:honeycode:${Region}:${Account}:table:workbook/${WorkbookId}/table/${TableId}'; arn = arn.replace('${WorkbookId}', workbookId); arn = arn.replace('${TableId}', tableId); arn = arn.replace('${Account}', account || '*'); arn = arn.replace('${Region}', region || '*'); arn = arn.replace('${Partition}', partition || 'aws'); return this.on(arn); } /** * Adds a resource of type screen to the statement * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/resource-screen.html * * @param workbookId - Identifier for the workbookId. * @param appId - Identifier for the appId. * @param screenId - Identifier for the screenId. * @param account - Account of the resource; defaults to empty string: all accounts. * @param region - Region of the resource; defaults to empty string: all regions. * @param partition - Partition of the AWS account [aws, aws-cn, aws-us-gov]; defaults to `aws`. */ public onScreen(workbookId: string, appId: string, screenId: string, account?: string, region?: string, partition?: string) { var arn = 'arn:${Partition}:honeycode:${Region}:${Account}:screen:workbook/${WorkbookId}/app/${AppId}/screen/${ScreenId}'; arn = arn.replace('${WorkbookId}', workbookId); arn = arn.replace('${AppId}', appId); arn = arn.replace('${ScreenId}', screenId); arn = arn.replace('${Account}', account || '*'); arn = arn.replace('${Region}', region || '*'); arn = arn.replace('${Partition}', partition || 'aws'); return this.on(arn); } /** * Adds a resource of type screen-automation to the statement * * https://docs.aws.amazon.com/honeycode/latest/UserGuide/resource-screen-automation.html * * @param workbookId - Identifier for the workbookId. * @param appId - Identifier for the appId. * @param screenId - Identifier for the screenId. * @param automationId - Identifier for the automationId. * @param account - Account of the resource; defaults to empty string: all accounts. * @param region - Region of the resource; defaults to empty string: all regions. * @param partition - Partition of the AWS account [aws, aws-cn, aws-us-gov]; defaults to `aws`. */ public onScreenAutomation(workbookId: string, appId: string, screenId: string, automationId: string, account?: string, region?: string, partition?: string) { var arn = 'arn:${Partition}:honeycode:${Region}:${Account}:screen-automation:workbook/${WorkbookId}/app/${AppId}/screen/${ScreenId}/automation/${AutomationId}'; arn = arn.replace('${WorkbookId}', workbookId); arn = arn.replace('${AppId}', appId); arn = arn.replace('${ScreenId}', screenId); arn = arn.replace('${AutomationId}', automationId); arn = arn.replace('${Account}', account || '*'); arn = arn.replace('${Region}', region || '*'); arn = arn.replace('${Partition}', partition || 'aws'); return this.on(arn); } }
/* * Copyright (c) 2015 Nokia Solutions and Networks. All rights reserved. */ package com.nsn.ood.cls.core.test; import java.util.List; import com.nsn.ood.cls.core.model.ClientTag; import com.nsn.ood.cls.core.model.ClientWithTag; import com.nsn.ood.cls.core.model.FeaturesWithTag; import com.nsn.ood.cls.model.gen.clients.Client; import com.nsn.ood.cls.model.gen.features.Feature; /** * @author marynows * */ public class ObjectWithTagTestUtil { public static ClientWithTag clientWithTag(final Client client, final ClientTag clientTag) { return new ClientWithTag().withObject(client).withClientTag(clientTag); } public static FeaturesWithTag featuresWithTag(final List<Feature> features, final ClientTag clientTag) { return new FeaturesWithTag().withObject(features).withClientTag(clientTag); } }
import { HttpEndpoint } from './HttpEndpoint'; export type HttpOptionsParams = { }; export function HttpOptions(params: HttpOptionsParams) : MethodDecorator { return HttpEndpoint({ methods: 'OPTIONS' }); };
#include "gtest/gtest.h" #include "Builder.h" #include "Tesselate.h" TEST(BuilderTest, Ellipse1d) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; parameters["r1"] = 10.0; parameters["r2"] = 5.0; mObject json; json["origin"] = origin; json["parameters"] = parameters; Ellipse1DBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(shape)); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_EQ(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, Bezier1d) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; mObject v0; v0["u"] = 0.0; v0["v"] = 0.0; v0["w"] = 0.0; mObject v1; v1["u"] = 10.0; v1["v"] = 0.0; v1["w"] = 0.0; mObject v2; v2["u"] = 0.0; v2["v"] = 10.0; v2["w"] = 0.0; mObject v3; v3["u"] = 10.0; v3["v"] = 10.0; v3["w"] = 0.0; mArray vertices; vertices.push_back(v0); vertices.push_back(v1); vertices.push_back(v2); vertices.push_back(v3); parameters["vertices"] = vertices; mObject json; json["origin"] = origin; json["parameters"] = parameters; Bezier1DBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(shape)); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_EQ(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, Ellipse2d) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; parameters["r1"] = 10.0; parameters["r2"] = 5.0; mObject json; json["origin"] = origin; json["parameters"] = parameters; Ellipse2DBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(shape)); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_NE(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, Rectangle2d) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; parameters["u"] = 10.0; parameters["v"] = 5.0; mObject json; json["origin"] = origin; json["parameters"] = parameters; Rectangle2DBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(shape)); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_NE(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, EllipseWithMajorSmallerThanMinor) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; parameters["r1"] = 5.0; parameters["r2"] = 10.0; parameters["from_angle"] = 30.0; parameters["to_angle"] = 45.0; mObject json; json["origin"] = origin; json["parameters"] = parameters; Ellipse2DBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(shape)); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_NE(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, Cone1) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; parameters["r1"] = 10.0; parameters["h"] = 10.0; parameters["r2"] = 5.0; mObject json; json["origin"] = origin; json["parameters"] = parameters; ConeBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(shape)); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_NE(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, Prism) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipseParameters; ellipseParameters["r1"] = 10.0; ellipseParameters["r2"] = 5.0; mObject ellipseJson; ellipseJson["origin"] = origin; ellipseJson["parameters"] = ellipseParameters; Ellipse2DBuilder ellipse2Dbuilder(ellipseJson); TopoDS_Shape ellipse2d = ellipse2Dbuilder.shape(); ASSERT_FALSE(ellipse2d.IsNull()); mObject prismJson; mObject prismParameters; prismParameters["u"] = 0.0; prismParameters["v"] = 0.0; prismParameters["w"] = 5.0; prismJson["origin"] = origin; prismJson["parameters"] = prismParameters; PrismBuilder prismBuilder(prismJson, ellipse2d); TopoDS_Shape prism = prismBuilder.shape(); ASSERT_FALSE(prism.IsNull()); } TEST(BuilderTest, Revolve) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipseParameters; ellipseParameters["r1"] = 10.0; ellipseParameters["r2"] = 5.0; mObject ellipseJson; ellipseJson["origin"] = origin; ellipseJson["parameters"] = ellipseParameters; Ellipse2DBuilder ellipse2Dbuilder(ellipseJson); TopoDS_Shape ellipse2d = ellipse2Dbuilder.shape(); ASSERT_FALSE(ellipse2d.IsNull()); mObject revolveJson; mObject revolveParameters; revolveParameters["u"] = 0.0; revolveParameters["v"] = 0.0; revolveParameters["w"] = 5.0; revolveParameters["angle"] = M_PI/2; revolveJson["origin"] = origin; revolveJson["parameters"] = revolveParameters; RevolveBuilder revolveBuilder(revolveJson, ellipse2d); TopoDS_Shape revolve = revolveBuilder.shape(); ASSERT_FALSE(revolve.IsNull()); } TEST(BuilderTest, MakeSolidFrom1DRevolve) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipseParameters; ellipseParameters["r1"] = 10.0; ellipseParameters["r2"] = 5.0; ellipseParameters["from_angle"] = 0; ellipseParameters["to_angle"] = 180; mObject ellipseJson; ellipseJson["origin"] = origin; ellipseJson["parameters"] = ellipseParameters; Ellipse1DBuilder ellipse1Dbuilder(ellipseJson); TopoDS_Shape ellipse1d = ellipse1Dbuilder.shape(); ASSERT_FALSE(ellipse1d.IsNull()); mObject revolveJson; mObject revolveParameters; revolveParameters["u"] = 1.0; revolveParameters["v"] = 0.0; revolveParameters["w"] = 0.0; revolveParameters["angle"] = M_PI*2; revolveJson["origin"] = origin; revolveJson["parameters"] = revolveParameters; RevolveBuilder revolveBuilder(revolveJson, ellipse1d); TopoDS_Shape revolve = revolveBuilder.shape(); ASSERT_FALSE(revolve.IsNull()); ASSERT_FALSE(TopExp_Explorer(revolve, TopAbs_SOLID).More()); mObject empty; vector<TopoDS_Shape> group1; group1.push_back(revolve); SolidBuilder makeSolid(empty, group1); ASSERT_FALSE(makeSolid.shape().IsNull()); ASSERT_TRUE(TopExp_Explorer(makeSolid.shape(), TopAbs_SOLID).More()); BRepClass3d_SolidClassifier classifier(makeSolid.shape()); classifier.PerformInfinitePoint(0.1); ASSERT_FALSE(classifier.State() == TopAbs_IN); } TEST(BuilderTest, Boolean1D) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipse1Parameters; ellipse1Parameters["r1"] = 10.0; ellipse1Parameters["r2"] = 10.0; mObject ellipse2Parameters; ellipse2Parameters["r1"] = 20.0; ellipse2Parameters["r2"] = 10.0; mObject ellipse1Json; ellipse1Json["origin"] = origin; ellipse1Json["parameters"] = ellipse1Parameters; mObject ellipse2Json; ellipse2Json["origin"] = origin; ellipse2Json["parameters"] = ellipse2Parameters; Ellipse1DBuilder ellipse1Builder(ellipse1Json); Ellipse1DBuilder ellipse2Builder(ellipse2Json); mObject unionJson; vector<TopoDS_Shape> group1; group1.push_back(ellipse1Builder.shape()); group1.push_back(ellipse2Builder.shape()); SubtractBuilder union1(unionJson, group1); ASSERT_FALSE(union1.shape().IsNull()); } TEST(BuilderTest, MakeFaceFromWires) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipse1Parameters; ellipse1Parameters["r1"] = 10.0; ellipse1Parameters["r2"] = 10.0; ellipse1Parameters["from_angle"] = 0.0; ellipse1Parameters["to_angle"] = 180.0; mObject ellipse2Parameters; ellipse2Parameters["r1"] = 10.0; ellipse2Parameters["r2"] = 20.0; ellipse2Parameters["from_angle"] = 0.0; ellipse2Parameters["to_angle"] = 180.0; mObject ellipse1Json; ellipse1Json["origin"] = origin; ellipse1Json["parameters"] = ellipse1Parameters; mObject ellipse2Json; ellipse2Json["origin"] = origin; ellipse2Json["parameters"] = ellipse2Parameters; Ellipse1DBuilder ellipse1Builder(ellipse1Json); Ellipse1DBuilder ellipse2Builder(ellipse2Json); mObject empty; vector<TopoDS_Shape> group1; group1.push_back(ellipse1Builder.shape()); group1.push_back(ellipse2Builder.shape()); FaceBuilder makeFace(empty, group1); ASSERT_FALSE(makeFace.shape().IsNull()); } TEST(BuilderTest, Polyline) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject polylineParameters; mObject v0; v0["u"] = 0.0; v0["v"] = 0.0; v0["w"] = 0.0; mObject v1; v1["u"] = 2.0; v1["v"] = 2.0; v1["w"] = 0.0; mArray vertices1; vertices1.push_back(v0); vertices1.push_back(v1); polylineParameters["vertices"] = vertices1; mObject polylineJson; polylineJson["origin"] = origin; polylineJson["parameters"] = polylineParameters; PolylineBuilder polylineBuilder(polylineJson); ASSERT_FALSE(polylineBuilder.shape().IsNull()); auto_ptr<Tesselator> tesselator(new Tesselator(polylineBuilder.shape())); mValue tesselation = tesselator->Tesselate(); mArray facesTesselation = tesselation.get_obj()["faces"].get_obj()["positions"].get_array(); mArray edgeTesselation = tesselation.get_obj()["edges"].get_obj()["positions"].get_array(); ASSERT_EQ(facesTesselation.size(), 0); ASSERT_NE(edgeTesselation.size(), 0); } TEST(BuilderTest, MakeFaceFromWiresRespectOrientation) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipse1Parameters; ellipse1Parameters["r1"] = 10.0; ellipse1Parameters["r2"] = 10.0; ellipse1Parameters["from_angle"] = 0.0; ellipse1Parameters["to_angle"] = 90.0; mObject ellipse1Json; ellipse1Json["origin"] = origin; ellipse1Json["parameters"] = ellipse1Parameters; mObject bezier1Parameters; mObject v0; v0["u"] = 0.0; v0["v"] = 0.0; v0["w"] = 0.0; mObject v1; v1["u"] = 2.0; v1["v"] = 2.0; v1["w"] = 0.0; mObject v2; v2["u"] = 2.0; v2["v"] = 2.0; v2["w"] = 0.0; mObject v3; v3["u"] = 10.0; v3["v"] = 0.0; v3["w"] = 0.0; mObject v3_2; v3_2["u"] = 0.0; v3_2["v"] = 10.0; v3_2["w"] = 0.0; mArray vertices1; vertices1.push_back(v0); vertices1.push_back(v1); vertices1.push_back(v2); vertices1.push_back(v3); bezier1Parameters["vertices"] = vertices1; mObject bezier2Parameters; mArray vertices2; vertices2.push_back(v0); vertices2.push_back(v1); vertices2.push_back(v2); vertices2.push_back(v3_2); bezier2Parameters["vertices"] = vertices2; mObject bezier1json; bezier1json["origin"] = origin; bezier1json["parameters"] = bezier1Parameters; mObject bezier2json; bezier2json["origin"] = origin; bezier2json["parameters"] = bezier2Parameters; Ellipse1DBuilder ellipse1Builder(ellipse1Json); Bezier1DBuilder bezier1Builder(bezier1json); Bezier1DBuilder bezier2Builder(bezier2json); mObject empty; vector<TopoDS_Shape> group1; group1.push_back(ellipse1Builder.shape()); group1.push_back(bezier1Builder.shape()); group1.push_back(bezier2Builder.shape()); FaceBuilder makeFace(empty, group1); ASSERT_FALSE(makeFace.shape().IsNull()); } TEST(BuilderTest, MakeFaceFromWiresNotALoop) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject ellipse1Parameters; ellipse1Parameters["r1"] = 10.0; ellipse1Parameters["r2"] = 10.0; ellipse1Parameters["from_angle"] = 0.0; ellipse1Parameters["to_angle"] = 180.0; mObject ellipse2Parameters; ellipse2Parameters["r1"] = 50.0; ellipse2Parameters["r2"] = 50.0; ellipse2Parameters["from_angle"] = 0.0; ellipse2Parameters["to_angle"] = 180.0; mObject ellipse1Json; ellipse1Json["origin"] = origin; ellipse1Json["parameters"] = ellipse1Parameters; mObject ellipse2Json; ellipse2Json["origin"] = origin; ellipse2Json["parameters"] = ellipse2Parameters; Ellipse1DBuilder ellipse1Builder(ellipse1Json); Ellipse1DBuilder ellipse2Builder(ellipse2Json); mObject emptyJson; vector<TopoDS_Shape> group1; group1.push_back(ellipse1Builder.shape()); group1.push_back(ellipse2Builder.shape()); try { FaceBuilder makeFace(emptyJson, group1); ASSERT_TRUE(false); } catch (wires_not_a_loop& e) { ASSERT_STREQ("Wires are not a loop", e.what()); } } TEST(BuilderTest, Loft) { mObject origin1; origin1["x"] = 0.0; origin1["y"] = 0.0; origin1["z"] = 0.0; mObject origin2; origin2["x"] = 0.0; origin2["y"] = 0.0; origin2["z"] = 10.0; mObject ellipse1Parameters; ellipse1Parameters["r1"] = 10.0; ellipse1Parameters["r2"] = 10.0; ellipse1Parameters["from_angle"] = 0.0; ellipse1Parameters["to_angle"] = 180.0; mObject ellipse2Parameters; ellipse2Parameters["r1"] = 50.0; ellipse2Parameters["r2"] = 50.0; ellipse2Parameters["from_angle"] = 0.0; ellipse2Parameters["to_angle"] = 180.0; mObject ellipse1Json; ellipse1Json["origin"] = origin1; ellipse1Json["parameters"] = ellipse1Parameters; mObject ellipse2Json; ellipse2Json["origin"] = origin2; ellipse2Json["parameters"] = ellipse2Parameters; Ellipse2DBuilder ellipse1Builder(ellipse1Json); Ellipse2DBuilder ellipse2Builder(ellipse2Json); mObject loftJson; vector<TopoDS_Shape> group1; group1.push_back(ellipse1Builder.shape()); group1.push_back(ellipse2Builder.shape()); mObject empty; LoftBuilder loft(loftJson, group1); ASSERT_FALSE(loft.shape().IsNull()); } TEST(BuilderTest, Workplane) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject cuboidParameters; cuboidParameters["u"] = 10.0; cuboidParameters["v"] = 10.0; cuboidParameters["w"] = 10.0; mObject workplaneOrigin; workplaneOrigin["x"] = 0.0; workplaneOrigin["y"] = 0.0; workplaneOrigin["z"] = 10.0; mObject workplaneAxis; workplaneAxis["x"] = 0.0; workplaneAxis["y"] = 0.0; workplaneAxis["z"] = 1.0; mObject workplane; workplane["origin"] = workplaneOrigin; workplane["axis"] = workplaneAxis; workplane["angle"] = 45.0; mObject cuboidJson; cuboidJson["origin"] = origin; cuboidJson["parameters"] = cuboidParameters; cuboidJson["workplane"] = workplane; CuboidBuilder cuboidBuilder(cuboidJson); ASSERT_FALSE(cuboidBuilder.shape().IsNull()); } TEST(BuilderTest, Fillet) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject cuboidParameters; cuboidParameters["u"] = 10.0; cuboidParameters["v"] = 10.0; cuboidParameters["w"] = 10.0; mObject cuboidJson; cuboidJson["origin"] = origin; cuboidJson["parameters"] = cuboidParameters; CuboidBuilder cuboidBuilder(cuboidJson); ASSERT_FALSE(cuboidBuilder.shape().IsNull()); mObject filletJSON; mObject filletParameters; filletParameters["r"] = 1.0; filletJSON["parameters"] = filletParameters; FilletBuilder filletBuilder(filletJSON, cuboidBuilder.shape()); ASSERT_FALSE(filletBuilder.shape().IsNull()); } TEST(BuilderTest, Boolean) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject cuboidParameters; cuboidParameters["u"] = 10.0; cuboidParameters["v"] = 10.0; cuboidParameters["w"] = 5.0; mObject cuboidJson; cuboidJson["origin"] = origin; cuboidJson["parameters"] = cuboidParameters; CuboidBuilder cuboidBuilder(cuboidJson); mObject ellipseParameters; ellipseParameters["r1"] = 10.0; ellipseParameters["r2"] = 5.0; mObject ellipseJson; ellipseJson["origin"] = origin; ellipseJson["parameters"] = ellipseParameters; Ellipse2DBuilder ellipseBuilder(ellipseJson); mObject subtractJson; vector<TopoDS_Shape> group1; group1.push_back(cuboidBuilder.shape()); group1.push_back(ellipseBuilder.shape()); SubtractBuilder subtract1(subtractJson, group1); ASSERT_FALSE(subtract1.shape().IsNull()); vector<TopoDS_Shape> group2; group2.push_back(ellipseBuilder.shape()); group2.push_back(cuboidBuilder.shape()); SubtractBuilder subtract2(subtractJson, group2); ASSERT_FALSE(subtract2.shape().IsNull()); } TEST(BuilderTest, Boolean2) { mObject origin1, origin2; origin1["x"] = 0.0; origin1["y"] = 0.0; origin1["z"] = 0.0; origin2["x"] = 10.0; origin2["y"] = 0.0; origin2["z"] = 0.0; mObject ellipseParameters; ellipseParameters["r1"] = 10.0; ellipseParameters["r2"] = 5.0; mObject ellipseJson1; ellipseJson1["origin"] = origin1; ellipseJson1["parameters"] = ellipseParameters; mObject ellipseJson2; ellipseJson2["origin"] = origin2; ellipseJson2["parameters"] = ellipseParameters; Ellipse2DBuilder ellipseBuilder1(ellipseJson1); Ellipse2DBuilder ellipseBuilder2(ellipseJson2); TopoDS_Builder builderA; TopoDS_Compound compoundA; builderA.MakeCompound(compoundA); builderA.Add(compoundA, ellipseBuilder1.shape()); TopoDS_Builder builderB; TopoDS_Compound compoundB; builderB.MakeCompound(compoundB); builderB.Add(compoundB, ellipseBuilder2.shape()); TopoDS_Shape fuse = BRepAlgoAPI_Fuse(compoundA, compoundB); int numFaces = 0; for (TopExp_Explorer faces(fuse, TopAbs_FACE); faces.More(); faces.Next()) { ++numFaces; } ASSERT_EQ(3, numFaces); } TEST(BuilderTest, Boolean3) { mObject originA, originB, originC, originD; originA["x"] = 0.0; originA["y"] = 0.0; originA["z"] = 0.0; originB["x"] = 0.0; originB["y"] = 15.0; originB["z"] = 0.0; originC["x"] = 0.0; originC["y"] = 5.0; originC["z"] = 0.0; originD["x"] = 0.0; originD["y"] = 20.0; originD["z"] = 0.0; mObject ellipseParameters; ellipseParameters["r1"] = 20.0; ellipseParameters["r2"] = 10.0; mObject ellipseAJson; ellipseAJson["origin"] = originA; ellipseAJson["parameters"] = ellipseParameters; mObject ellipseBJson; ellipseBJson["origin"] = originB; ellipseBJson["parameters"] = ellipseParameters; mObject ellipseCJson; ellipseCJson["origin"] = originC; ellipseCJson["parameters"] = ellipseParameters; mObject ellipseDJson; ellipseDJson["origin"] = originA; ellipseDJson["parameters"] = ellipseParameters; Ellipse2DBuilder ellipseBuilderA(ellipseAJson); Ellipse2DBuilder ellipseBuilderB(ellipseBJson); Ellipse2DBuilder ellipseBuilderC(ellipseCJson); Ellipse2DBuilder ellipseBuilderD(ellipseDJson); vector<TopoDS_Shape> group1; group1.push_back(ellipseBuilderA.shape()); group1.push_back(ellipseBuilderB.shape()); vector<TopoDS_Shape> group2; group2.push_back(ellipseBuilderC.shape()); group2.push_back(ellipseBuilderD.shape()); mObject subtractJson; SubtractBuilder subtract1(subtractJson, group1); SubtractBuilder subtract2(subtractJson, group2); ASSERT_EQ(TopAbs_COMPOUND, subtract1.shape().ShapeType()); mObject prismOrigin; prismOrigin["x"] = 0.0; prismOrigin["y"] = 0.0; prismOrigin["z"] = 0.0; mObject prismParameters; prismParameters["u"] = 0.0; prismParameters["v"] = 0.0; prismParameters["w"] = 10.0; mObject prismJson; prismJson["origin"] = prismOrigin; prismJson["parameters"] = prismParameters; PrismBuilder prism1(prismJson, subtract1.shape()); PrismBuilder prism2(prismJson, subtract2.shape()); ASSERT_FALSE(prism1.shape().IsNull()); ASSERT_FALSE(prism2.shape().IsNull()); BRepAlgoAPI_Fuse(prism1.shape(), prism2.shape()); vector<TopoDS_Shape> finalGroup; finalGroup.push_back(prism1.shape()); finalGroup.push_back(prism2.shape()); IntersectBuilder final(subtractJson, finalGroup); ASSERT_FALSE(final.shape().IsNull()); } TEST(BuilderTest, Text) { mObject origin; origin["x"] = 0.0; origin["y"] = 0.0; origin["z"] = 0.0; mObject parameters; parameters["text"] = "12 3"; parameters["font"] = "OpenSans"; mObject json; json["origin"] = origin; json["parameters"] = parameters; Text2DBuilder builder(json); TopoDS_Shape shape = builder.shape(); ASSERT_FALSE(shape.IsNull()); }
Museum Scholarly Communications and Copyright Law: A Call for Balanced and Nuanced Exceptions Premised on Museum Mission and Mandate Abstract The purpose of this paper is to describe both the process and policy reasons for ICOM's participation at the World Intellectual Property Organization's Intergovernmental Committee Meetings on Copyright and Related Rights, seeking exceptions to copyright law for libraries, archives and museums. Over the course of the past three years, ICOM has played a substantial role as a non‐governmental organisation by providing both leadership and strategy during the course of deliberations by member states of the World Intellectual Property Organization as they grapple with the thorny issue of exceptions to copyright law. Museums, over the course of the last decade, have made a remarkable transition in creating their own online presence with a profound impact on their own scholarly communications. Whether to inform museum scholars of interesting curatorial developments, shine light on new collections or communicate in new ways with museum visitors and audiences through various social media platforms, museums have eagerly moved their voices and increased their spaces through the use of new communications technologies. At the same time, digitisation efforts are maturing and they are poised to increase rapidly. New technologies and applications have allowed for deep search capability, and rapid and cost effective mass digitization. In response, there has been an ongoing call to provide for a streamlined approach to copyright law that will provide museums, libraries and archives with a greater degree of certainty that the digital uses of the works in their collections are, in fact, lawful. The challenge rests, however, in finding a balanced approach so as not to impact negatively on existing markets for the works of authors and artists in museum collections.
<gh_stars>0 package behavioural.strategy; public interface Strategy { Integer doOperation(int a, int b); }
Intertester reliability and concurrent validity of fluid-based and universal goniometers for active knee flexion. This study was designed 1) to determine the intertester reliability of the universal goniometer and the fluid-based goniometer and 2) to establish the concurrent validity of the fluid-based goniometer. Two testers measured active knee flexion of 20 healthy subjects (15 women, 5 men) using both instruments. The subjects had a mean age of 24.8 years (s = 5.5) and reported no previous history of pathological conditions of the right lower extremity. Using a correlational design, high intertester reliability was established for each instrument (universal goniometer, r = .87; fluid-based goniometer, r = .83). The concurrent validity of the fluid-based goniometer was also good for both testers (tester A, r = .83; tester B, r = .82). When the data were subjected to t tests, significant differences were found between the instruments for each tester (p less than .05). The results suggest that similar measurements will be obtained between therapists using the universal and fluid-based goniometers; however, the two instruments cannot be used interchangeably in the clinic.
answers in genesis tackles logic, fails miserably Answers In Genesis tries to debate the divine origins of logic as if it was a law of physics with the most illogical drivel they could muster. When you live in constant denial, one of the benefits of your avoidance of the real world is getting to make up your own rules of physics, chemistry, biology and history, and then pretending as if they actually apply outside of your imagination, especially when you sell them as your religious beliefs. Just ask Ken Ham and his band of professional ignoramuses. After publishing countless articles that are wrong about virtually every aspect of middle school science and opening a museum which serves as a testament of rabid religious denialism, they decided that if you believe that The Flintstones was an accurate account of human pre-history, you could tackle logical constructs to prove that those uppity atheists are the irrational ones rather than you. Really, at this point it’s actually kind of sad because the whole thing reads like it was written by a sixth grader… The materialistic atheist can’t have laws of logic. He believes that everything that exists is material — part of the physical world. But laws of logic are not physical. You can’t stub your toe on a law of logic. Laws of logic cannot exist in the atheist’s world, yet he uses them to try to reason. … He is borrowing from the Christian worldview to argue against the Christian worldview. The atheist’s view cannot be rational because he uses things that cannot exist according to his profession. Basically, the argument here is that since materialists believe everything is a part of a natural world and laws of logic aren’t tangible, therefore Christianity according to them is right and there’s a magical creature running the 6,000 year old universe to which you can telepathically talk at your leisure. Wow, gee, isn’t Jason Lisle, the AiG astrophysicist who sacrificed his reason to Ham’s inane war on science, so logical? Too bad he can’t distinguish between abstract concepts and physical entities, basing his entire argument on that inability, but hey, he says he can prove it by confusing the very measurable and well-defined physical laws of causality with the notion of a god’s supernatural consistency. Oh and by borrowing an example from Sunday School for first and second graders, which you’ll probably be able to recognize pretty quickly. Breathing requires air, not a profession of belief in air. Likewise, logical reasoning requires God, not a profession of belief in Him. Of course the atheist can reason; it’s because God has made his mind and given him access to the laws of logic — and that’s the point. It’s because God exists that reasoning is possible. The atheist can reason, but within his own worldview he cannot account for his ability to reason. Umm… ok? Note to the AiG’s zombies: asserting things doesn’t mean you logically proved them. We can see air, we can breathe it, we can trap it, measure it, analyze it, and create it in the lab. If you’re going to compare a deity to air, you better make sure we can do the same with the deity, otherwise you’re comparing apples to the third moon of Omicron Persei 8 and pretending that your lapse in thought is somehow logical. Even worse is the ridiculous insistence that just because we exist, it must mean your God exists too. We can reason thanks to millions of years of evolution which gave us the rudiments of intelligence, a real sense of right and wrong straight from the cradle, and helped spawn societies with their legal codices. But wouldn’t you know it, the brilliant logician that is Lisle saw through the ruse and can counter with the following… The atheist might respond, “Laws of logic are material they’re electro-chemical connections in the brain.” But then the laws of logic are not universal; they would not extend beyond the brain. In other words, we couldn’t argue that contradictions cannot occur on Mars, since no one’s brain is on Mars. In fact, if the laws of logic are just electro-chemical connections in the brain, then they would differ somewhat from person to person because everyone has different connections in their brain. Ow. The stupid. It burns like the death ray spawned by a feeding black hole’s surging magnetic fields, as Orac would probably say. While trying to prove that the laws of logic are invariable throughout the universe, Lisle is so completely unaware of what he’s saying, he doesn’t even notice that he proves himself wrong. Obviously, a mix of fundamentalism and denial gave him very different ideas of what the laws of logic are for most people since he’s trying to argue that his ability to think the way a rabid Biblical literalist does is incontrovertible proof of a deity. And on top of that, just to make this even worse and more embarrassing on himself, he goes on to compare the uniform laws of physics on a macro scale to his personal interpretations of Ham’s screeds. So he thinks that he logically proved that because gravity doesn’t reverse itself on Mars, he was granted supreme logical skills from the creator of the universe. Unless he prefixed all this with the world “Christianity,” most of us would probably think that he should spend a few weeks in a padded room for observation. Is this what passes for logic in AiG’s collective lapse of thought? This is like being lectured about genetics by an obnoxious six year old who thinks he knows so much about the subject that he can condescend to you as he spews out utter nonsense. And if it wasn’t grown men doing the same thing and insisting on propagating this insipidity among others under a false guise of scientific authority, it would actually be kind of funny. But in reality, this exercise in “logic” is just the ramblings of a zealot desperately grasping at straws and passing off mindless repetition and assertions as an argument for a magical sky man who listens to his thoughts if he thinks hard enough. Really, what deity would even want to have such followers?
print("Cal count bc you're fat") gfat = float(input("Grams of Fat: ")) gprotein = float(input("Grams of Protein: ")) gcarbohydrate = float(input("Grams of Carbohydrates: ")) # Calorie calc ONEG_FAT = 9 ONEG_PROTEIN = 4 ONEG_CARBOHYD = 4 fatCal = gfat * ONEG_FAT print ("Grams of Fat: " +str(fatCal)) proteinCal = gprotein * ONEG_PROTEIN print ("Grams of Protein: " + str(proteinCal)) carbohydCal = gcarbohydrate * ONEG_CARBOHYD print ("Grams of Carbohydrates: " + str(carbohydCal)) totalCal = fatCal + proteinCal + carbohydCal print ("Total Calories: " + str(totalCal))
import re import pytest from textacy import extract @pytest.fixture(scope="module") def sss_doc(lang_en): text = ( "In general, Burton DeWilde loves animals, but he does not love *all* animals. " "Burton loves his cats Rico and Isaac; Burton loved his cat Lucy. " "But Burton DeWilde definitely does not love snakes, spiders, or moths. " "Now you know that Burton loves animals and cats in particular." ) return lang_en(text) @pytest.mark.parametrize( "text, svos_exp", [ ( "Burton loves cats.", [(["Burton"], ["loves"], ["cats"])], ), ( "One of my cats was eating food.", [(["One"], ["was", "eating"], ["food"])], ), ( "The first dog to arrive wins the treat.", [(["dog"], ["wins"], ["treat"])], ), ( "Burton was loved by cats.", [(["Burton"], ["was", "loved"], ["cats"])], ), ( "Food was eaten by my cat.", [(["Food"], ["was", "eaten"], ["cat"])], ), # NOTE: this case is failing in spaCy v3.4.1 # let's hide it for now so that tests pass overall # ( # "The treat was won by the first dog to arrive.", # [(["treat"], ["was", "won"], ["dog"])], # ), ( "He and I love house cats and big dogs.", [(["He", "I"], ["love"], ["house", "cats", "dogs"])], ), # NOTE: this case is failing as of spacy v3.5(?) # let's hide it for now so that tests pass overall # ( # "We do love and did hate small dogs.", # [(["We"], ["do", "love"], ["dogs"]), (["We"], ["did", "hate"], ["dogs"])], # ), ( "Rico eats food and plays fetch.", [(["Rico"], ["eats"], ["food"]), (["Rico"], ["plays"], ["fetch"])], ), # NOTE: this case is failing as of spacy v3.2 # let's hide it for now so that tests pass overall # ( # "What the cat did makes sense.", # [ # (["cat"], ["did"], ["What"]), # (["What", "the", "cat", "did"], ["makes"], ["sense"]), # ], # ), ( "What the cat wanted was eaten by the dog.", [ (["cat"], ["wanted"], ["What"]), (["What", "the", "cat", "wanted"], ["was", "eaten"], ["dog"]), ], ), ( "Burton and Nick do not expect to adopt another cat.", [ ( ["Burton", "Nick"], ["do", "not", "expect"], ["to", "adopt", "another", "cat"], ) ], ), ( "She and her friend did find, sell, and throw sea shells by the sea shore.", [ (["She", "friend"], ["did", "find"], ["sea", "shells"]), (["She", "friend"], ["sell"], ["sea", "shells"]), (["She", "friend"], ["throw"], ["sea", "shells"]), ], ), ], ) def test_subject_verb_object_triples(text, svos_exp, lang_en): doc = lang_en(text) svos = list(extract.subject_verb_object_triples(doc)) assert all( hasattr(svo, attr) for svo in svos for attr in ["subject", "verb", "object"] ) svos_obs = [ ( [tok.text for tok in subject], [tok.text for tok in verb], [tok.text for tok in object], ) for subject, verb, object in svos ] assert svos_obs == svos_exp @pytest.mark.parametrize( "entity, cue, fragment_len_range, exp", [ # NOTE: this case is failing in spaCy v3.4.1 # let's hide it for now so that tests pass overall # ( # "Burton", # "love", # None, # [ # (["Burton"], ["loves"], ["his", "cats", "Rico", "and", "Isaac"]), # (["Burton"], ["loved"], ["his", "cat", "Lucy"]), # (["Burton"], ["loves"], ["animals", "and", "cats"]), # ], # ), # ( # re.compile("Burton"), # "love", # None, # [ # (["Burton"], ["loves"], ["his", "cats", "Rico", "and", "Isaac"]), # (["Burton"], ["loved"], ["his", "cat", "Lucy"]), # (["Burton"], ["loves"], ["animals", "and", "cats"]), # ], # ), # ( # "Burton( DeWilde)?", # "love", # None, # [ # (["Burton", "DeWilde"], ["loves"], ["animals"]), # (["Burton"], ["loves"], ["his", "cats", "Rico", "and", "Isaac"]), # (["Burton"], ["loved"], ["his", "cat", "Lucy"]), # ( # ["Burton", "DeWilde"], # ["does", "not", "love"], # ["snakes", ",", "spiders", ",", "or", "moths"], # ), # (["Burton"], ["loves"], ["animals", "and", "cats"]), # ], # ), # ( # "Burton", # "love", # (None, 4), # [ # (["Burton"], ["loved"], ["his", "cat", "Lucy"]), # (["Burton"], ["loves"], ["animals", "and", "cats"]), # ], # ), # ( # "Burton", # "love", # (4, 6), # [(["Burton"], ["loves"], ["his", "cats", "Rico", "and", "Isaac"])], # ), ("Burton", "hate", None, []), ], ) def test_semistructured_statements(sss_doc, entity, cue, fragment_len_range, exp): obs = list( extract.semistructured_statements( sss_doc, entity=entity, cue=cue, fragment_len_range=fragment_len_range ) ) assert all( hasattr(sss, attr) for sss in obs for attr in ["entity", "cue", "fragment"] ) obs_text = [ ([tok.text for tok in e], [tok.text for tok in c], [tok.text for tok in f]) for e, c, f in obs ] assert obs_text == exp @pytest.mark.parametrize( "text, exp", [ ( 'Burton said, "I love cats!"', [(["Burton"], ["said"], '"I love cats!"')], ), # NOTE: this case is failing as of spacy v3.2 # let's hide it for now so that tests pass overall # ( # '"We love cats!" reply Burton and Nick.', # [(["Burton", "Nick"], ["reply"], '"We love cats!"')], # ), ( 'Burton explained from a podium. "I love cats," he said.', [(["he"], ["said"], '"I love cats,"')], ), ( '"I love cats!" insists Burton. "I absolutely do."', [ (["Burton"], ["insists"], '"I love cats!"'), (["Burton"], ["insists"], '"I absolutely do."'), ], ), ( '"Some people say otherwise," he conceded.', [(["he"], ["conceded"], '"Some people say otherwise,"')], ), ( 'Burton claims that his favorite book is "One Hundred Years of Solitude".', [], ), ( 'Burton thinks that cats are "cuties".', [], ), ], ) def test_direct_quotations(lang_en, text, exp): obs = list(extract.direct_quotations(lang_en(text))) assert all(hasattr(dq, attr) for dq in obs for attr in ["speaker", "cue", "content"]) obs_text = [ ([tok.text for tok in speaker], [tok.text for tok in cue], content.text) for speaker, cue, content in obs ] assert obs_text == exp @pytest.mark.parametrize( "text, exp", [ ( '"Me encantan los gatos", dijo Burtón.', [(["Burtón"], ["dijo"], '"Me encantan los gatos"')], ), ( "«Me encantan los gatos», afirmó Burtón.", [(["Burtón"], ["afirmó"], "«Me encantan los gatos»")], ), ], ) def test_direct_quotations_spanish(lang_es, text, exp): obs = extract.direct_quotations(lang_es(text)) obs_text = [ ([tok.text for tok in speaker], [tok.text for tok in cue], content.text) for speaker, cue, content in obs ] assert obs_text == exp
package fundamentals.stack.evaluation.lc150_evaluatereversepolishnotation; import java.util.Stack; /** * Evaluate the value of an arithmetic expression in Reverse Polish Notation. * Valid operators are +, -, *, /. Each operand may be an integer or another expression. * Some examples: * ["2", "1", "+", "3", "*"] -> ((2 + 1) * 3) -> 9 * ["4", "13", "5", "/", "+"] -> (4 + (13 / 5)) -> 6 */ public class Solution { public int evalRPN(String[] tokens) { Stack<Integer> s = new Stack<>(); for (String t : tokens) { if ("+".equals(t)) s.push(s.pop() + s.pop()); else if ("-".equals(t)) s.push(-s.pop() + s.pop()); else if ("*".equals(t)) s.push(s.pop() * s.pop()); else if ("/".equals(t)) { int div = s.pop(); s.push(s.pop() / div); } else s.push(Integer.parseInt(t)); } return s.pop(); } public int evalRPN3(String[] tokens) { Stack<Integer> s = new Stack<>(); for (String t : tokens) { switch (t) { case "+": s.push(s.pop() + s.pop()); break; case "*": s.push(s.pop() * s.pop()); break; case "-": s.push(-s.pop() + s.pop()); break; // nice trick! case "/": int div = s.pop(); s.push(s.pop() / div); break; default: s.push(Integer.valueOf(t)); break; } } return s.isEmpty() ? 0 : s.pop(); } // My 2AC: O(N) time and O(N) space. Assume the expression is valid. // Illegal char "1a", bad format ["/", '1'], div-by-zero ["1","0","/"] public int evalRPN2(String[] tokens) { Stack<Integer> stack = new Stack<>(); for (String t : tokens) { switch (t) { // Java 8! case "+": stack.push(stack.pop() + stack.pop()); break; case "-": int sub = stack.pop(); stack.push(stack.pop() - sub); break; case "*": stack.push(stack.pop() * stack.pop()); break; case "/": int div = stack.pop(); stack.push(stack.pop() / div); break; default: stack.push(Integer.valueOf(t)); break; } } return stack.isEmpty() ? 0 : stack.pop(); } // My 1AC: it's very dangerous to check '-' and '-1'. See nice string match in 2AC. public int evalRPN1(String[] tokens) { Stack<Integer> stack = new Stack<>(); for (int i = 0; i < tokens.length; i++) { String token = tokens[i]; // Must be number with/out sign if (token.length() > 1) { stack.push(Integer.valueOf(token)); continue; } // Could be operator or number char c = token.charAt(0); switch (c) { case '+': stack.push(stack.pop() + stack.pop()); break; case '*': stack.push(stack.pop() * stack.pop()); break; case '-': int sub1 = stack.pop(); int sub2 = stack.pop(); stack.push(sub2 - sub1); break; case '/': int div1 = stack.pop(); int div2 = stack.pop(); stack.push(div2 / div1); break; default: stack.push(Integer.valueOf(token)); break; } } return stack.isEmpty() ? 0 : stack.pop(); } }
/** * Write all the attributes (in JSON format) for the given HDF5 object out to the specified file. */ private void writeAttributesToFile(String filename, long objID, String printPrefix) throws Exception { File tmpFile = new File(filename); if (tmpFile.exists()) { throw new Exception("The given Attributes output file already exists: " + filename); } String attributesStr = getAttributes(objID, printPrefix); int finalSlashIdx = filename.lastIndexOf(File.separatorChar); String folderPath = filename.substring(0,finalSlashIdx); File folderPathF = new File(folderPath); folderPathF.mkdirs(); PrintWriter pw = new PrintWriter(filename); pw.print(attributesStr); pw.close(); }
package seedu.clinicio.model; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_ADDRESS_BOB; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_NAME_ADAM; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_NRIC_ALEX; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_PASSWORD_ADAM; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_PRICE_ORACORT; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_PRICE_PARACETAMOL; import static seedu.clinicio.logic.commands.CommandTestUtil.VALID_TAG_HUSBAND; import static seedu.clinicio.model.staff.Role.DOCTOR; import static seedu.clinicio.testutil.Assert.assertThrows; import static seedu.clinicio.testutil.TypicalPersons.ADAM; import static seedu.clinicio.testutil.TypicalPersons.ALEX; import static seedu.clinicio.testutil.TypicalPersons.ALEX_APPT; import static seedu.clinicio.testutil.TypicalPersons.ALICE; import static seedu.clinicio.testutil.TypicalPersons.BRYAN_APPT; import static seedu.clinicio.testutil.TypicalPersons.CARL_APPT; import static seedu.clinicio.testutil.TypicalPersons.CHLORPHENIRAMINE; import static seedu.clinicio.testutil.TypicalPersons.ORACORT; import static seedu.clinicio.testutil.TypicalPersons.PARACETAMOL; import static seedu.clinicio.testutil.TypicalPersons.VENTOLIN; import static seedu.clinicio.testutil.TypicalPersons.getTypicalClinicIo; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import seedu.clinicio.model.appointment.Appointment; import seedu.clinicio.model.appointment.exceptions.AppointmentClashException; import seedu.clinicio.model.appointment.exceptions.DuplicateAppointmentException; import seedu.clinicio.model.consultation.Consultation; import seedu.clinicio.model.medicine.Medicine; import seedu.clinicio.model.medicine.exceptions.DuplicateMedicineException; import seedu.clinicio.model.patient.Patient; import seedu.clinicio.model.patient.exceptions.DuplicatePatientException; import seedu.clinicio.model.person.Name; import seedu.clinicio.model.person.Person; import seedu.clinicio.model.person.exceptions.DuplicatePersonException; import seedu.clinicio.model.staff.Password; import seedu.clinicio.model.staff.Role; import seedu.clinicio.model.staff.Staff; import seedu.clinicio.model.staff.exceptions.DuplicateStaffException; import seedu.clinicio.model.staff.exceptions.StaffNotFoundException; import seedu.clinicio.testutil.AppointmentBuilder; import seedu.clinicio.testutil.MedicineBuilder; import seedu.clinicio.testutil.PatientBuilder; import seedu.clinicio.testutil.PersonBuilder; import seedu.clinicio.testutil.StaffBuilder; public class ClinicIoTest { @Rule public ExpectedException thrown = ExpectedException.none(); private final ClinicIo clinicIo = new ClinicIo(); @Test public void constructor() { assertEquals(Collections.emptyList(), clinicIo.getPersonList()); //@@author jjlee050 assertEquals(Collections.emptyList(), clinicIo.getPatientList()); //@@author jjlee050 assertEquals(Collections.emptyList(), clinicIo.getStaffList()); //@@author aaronseahyh assertEquals(Collections.emptyList(), clinicIo.getMedicineList()); //@@author gingivitiss assertEquals(Collections.emptyList(), clinicIo.getAppointmentList()); assertEquals(Collections.emptyList(), clinicIo.getConsultationList()); } @Test public void resetData_null_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.resetData(null); } @Test public void resetData_withValidReadOnlyClinicIo_replacesData() { ClinicIo newData = getTypicalClinicIo(); clinicIo.resetData(newData); assertEquals(newData, clinicIo); } @Test public void resetData_withDuplicatePersons_throwsDuplicatePersonException() { // Two persons with the same identity fields Person editedAlice = new PersonBuilder(ALICE).withAddress(VALID_ADDRESS_BOB).withTags(VALID_TAG_HUSBAND) .build(); List<Appointment> newAppointments = Arrays.asList(BRYAN_APPT, ALEX_APPT); List<Medicine> newMedicines = Arrays.asList(PARACETAMOL, VENTOLIN, CHLORPHENIRAMINE, ORACORT); List<Person> newPersons = Arrays.asList(ALICE, editedAlice); ClinicIoStub newData = new ClinicIoStub(newAppointments, newPersons, new ArrayList<>(), new ArrayList<>(), newMedicines); thrown.expect(DuplicatePersonException.class); clinicIo.resetData(newData); } //@@author jjlee050 @Test public void resetData_withDuplicatePatients_throwsDuplicatePatientException() { // Two persons with the same identity fields Patient editedAlex = new PatientBuilder(ALEX).withAddress(VALID_ADDRESS_BOB) .build(); List<Patient> newPatients = Arrays.asList(ALEX, editedAlex); List<Medicine> newMedicines = Arrays.asList(PARACETAMOL, VENTOLIN, CHLORPHENIRAMINE, ORACORT); ClinicIoStub newData = new ClinicIoStub(new ArrayList<>(), new ArrayList<>(), newPatients, new ArrayList<>(), newMedicines); thrown.expect(DuplicatePatientException.class); clinicIo.resetData(newData); } //@@author jjlee050 @Test public void resetData_withDuplicateStaffs_throwsDuplicateStaffException() { // Two staff with the same identity fields List<Appointment> newAppointments = Arrays.asList(BRYAN_APPT, ALEX_APPT); List<Medicine> newMedicines = Arrays.asList(PARACETAMOL, VENTOLIN, CHLORPHENIRAMINE, ORACORT); Staff editedAdam = new StaffBuilder(ADAM).withName(VALID_NAME_ADAM).build(); List<Person> newPersons = Arrays.asList(ALICE); List<Staff> newStaffs = Arrays.asList(ADAM, editedAdam); ClinicIoStub newData = new ClinicIoStub(newAppointments, newPersons, new ArrayList<>(), newStaffs, newMedicines); thrown.expect(DuplicateStaffException.class); clinicIo.resetData(newData); } //@@author gingivitiss @Test public void resetData_withDuplicateAppointments_throwsDuplicateAppointmentException() { //Two appointments with the same identity fields Appointment editedAmy = new AppointmentBuilder(ALEX_APPT).withPatient(ALEX).build(); List<Appointment> newAppointments = Arrays.asList(ALEX_APPT, editedAmy); List<Medicine> newMedicines = Arrays.asList(PARACETAMOL, VENTOLIN, CHLORPHENIRAMINE, ORACORT); List<Person> newPersons = Arrays.asList(ALICE); List<Staff> newStaffs = Arrays.asList(ADAM); ClinicIoStub newData = new ClinicIoStub(newAppointments, newPersons, new ArrayList<>(), newStaffs, newMedicines); thrown.expect(DuplicateAppointmentException.class); clinicIo.resetData(newData); } @Test public void resetData_withClashingAppointments_throwsClashingAppointmentException() { //Two appointments with clashing slots Appointment clashingAppt = new AppointmentBuilder(CARL_APPT) .withTime(13, 30).build(); List<Appointment> newAppointments = Arrays.asList(ALEX_APPT, clashingAppt); List<Medicine> newMedicines = Arrays.asList(PARACETAMOL, VENTOLIN, CHLORPHENIRAMINE, ORACORT); List<Person> newPersons = Arrays.asList(ALICE); List<Staff> newStaffs = Arrays.asList(ADAM); ClinicIoStub newData = new ClinicIoStub(newAppointments, newPersons, new ArrayList<>(), newStaffs, newMedicines); thrown.expect(AppointmentClashException.class); clinicIo.resetData(newData); } //@@author aaronseahyh @Test public void resetData_withDuplicateMedicines_throwsDuplicateMedicineException() { //Two medicines with the same identity fields List<Appointment> newAppointments = Arrays.asList(ALEX_APPT); Medicine editedMedicine = new MedicineBuilder(PARACETAMOL).withMedicinePrice(VALID_PRICE_PARACETAMOL) .build(); List<Medicine> newMedicines = Arrays.asList(editedMedicine, PARACETAMOL, VENTOLIN, CHLORPHENIRAMINE, ORACORT); List<Person> newPersons = Arrays.asList(ALICE); List<Staff> newStaffs = Arrays.asList(ADAM); ClinicIoStub newData = new ClinicIoStub(newAppointments, newPersons, new ArrayList<>(), newStaffs, newMedicines); thrown.expect(DuplicateMedicineException.class); clinicIo.resetData(newData); } @Test public void hasPerson_nullPerson_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.hasPerson(null); } //@@author jjlee050 @Test public void hasPatient_nullPatient_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.hasPatient(null); } //@@author jjlee050 @Test public void hasStaff_nullStaff_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.hasStaff(null); } //@@author gingivitiss @Test public void hasAppointment_nullAppointment_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.hasAppointment(null); } //@@author aaronseahyh @Test public void hasMedicine_nullMedicine_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.hasMedicine(null); } @Test public void hasConsultation_nullConsultation_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.hasConsultation(null); } @Test public void hasPerson_personNotInClinicIo_returnsFalse() { assertFalse(clinicIo.hasPerson(ALICE)); } //@@author jjlee050 @Test public void hasPatient_patientNotInClinicIo_returnsFalse() { assertFalse(clinicIo.hasPatient(ALEX)); } //@@author jjlee050 @Test public void hasStaff_staffNotInClinicIo_returnsFalse() { assertFalse(clinicIo.hasStaff(ADAM)); } //@@author gingivitiss @Test public void hasAppointment_appointmentNotInClinicIo_returnsFalse() { assertFalse(clinicIo.hasAppointment(ALEX_APPT)); } //@@author aaronseahyh @Test public void hasMedicine_medicineNotInClinicIo_returnsFalse() { assertFalse(clinicIo.hasMedicine(ORACORT)); } // TODO: Add consultation test case @Test public void hasPerson_personInClinicIo_returnsTrue() { clinicIo.addPerson(ALICE); assertTrue(clinicIo.hasPerson(ALICE)); } //@@author jjlee050 @Test public void hasPatient_patientInClinicIo_returnsTrue() { clinicIo.addPatient(ALEX); assertTrue(clinicIo.hasPatient(ALEX)); } //@@author jjlee050 @Test public void hasStaff_staffInClinicIo_returnsTrue() { clinicIo.addStaff(ADAM); assertTrue(clinicIo.hasStaff(ADAM)); } //@@author gingivitiss @Test public void hasAppointment_appointmentInClinicIo_returnsTrue() { clinicIo.addAppointment(ALEX_APPT); assertTrue(clinicIo.hasAppointment(ALEX_APPT)); } //@@author aaronseahyh @Test public void hasMedicine_medicineInClinicIo_returnsTrue() { clinicIo.addMedicine(PARACETAMOL); assertTrue(clinicIo.hasMedicine(PARACETAMOL)); } // TODO: Add consultation test case @Test public void hasPerson_personWithSameIdentityFieldsInClinicIo_returnsTrue() { clinicIo.addPerson(ALICE); Person editedAlice = new PersonBuilder(ALICE).withAddress(VALID_ADDRESS_BOB).withTags(VALID_TAG_HUSBAND) .build(); assertTrue(clinicIo.hasPerson(editedAlice)); } //@@author jjlee050 @Test public void hasPatient_patientWithSameIdentityFieldsInClinicIo_returnsTrue() { clinicIo.addPatient(ALEX); Patient editedAlex = new PatientBuilder(ALEX).withNric(VALID_NRIC_ALEX) .build(); assertTrue(clinicIo.hasPatient(editedAlex)); } //@@author jjlee050 @Test public void hasStaff_staffWithSameIdentityFieldsInClinicIo_returnsTrue() { clinicIo.addStaff(ADAM); Staff editedAdam = new StaffBuilder(ADAM).withRole(Role.RECEPTIONIST) .build(); assertTrue(clinicIo.hasStaff(editedAdam)); } //@@author gingivitiss @Test public void hasAppointment_appointmentWithSameIdentityFieldsInAddressBook_returnsTrue() { clinicIo.addAppointment(ALEX_APPT); Appointment editedAmy = new AppointmentBuilder(ALEX_APPT).withTime(13, 00).build(); assertTrue(clinicIo.hasAppointment(editedAmy)); } @Test public void hasAppointment_appointmentWithDifferentIdentityFieldsInClinicIo_returnsFalse() { clinicIo.addAppointment(ALEX_APPT); Appointment editedAmy = new AppointmentBuilder(ALEX_APPT).withTime(12, 00).build(); assertFalse(clinicIo.hasAppointment(editedAmy)); } @Test public void hasAppointmentClash_appointmentWithSameTimingsInClinicIo_returnsTrue() { clinicIo.addAppointment(ALEX_APPT); Appointment newAppt = new AppointmentBuilder(CARL_APPT).withTime(13, 00).build(); assertTrue(clinicIo.hasAppointmentClash(newAppt)); } //@@author aaronseahyh @Test public void hasMedicine_medicineWithSameIdentityFieldsInClinicIo_returnsTrue() { clinicIo.addMedicine(PARACETAMOL); Medicine editedParacetamol = new MedicineBuilder(PARACETAMOL).withMedicinePrice(VALID_PRICE_ORACORT) .build(); assertTrue(clinicIo.hasMedicine(editedParacetamol)); } // TODO: Add consultation test case //@@author jjlee050 @Test public void checkStaffCredentials_nullStaff_throwsNullPointerException() { thrown.expect(NullPointerException.class); clinicIo.checkStaffCredentials(null); } //@@author jjlee050 @Test public void checkStaffCredentials_staffNotInClinicIo_throwStaffNotFoundException() { Staff staffToCheck = new Staff(DOCTOR, new Name(VALID_NAME_ADAM), new Password(VALID_PASSWORD_ADAM, false)); assertThrows(StaffNotFoundException.class, () -> clinicIo.checkStaffCredentials(staffToCheck)); } //@@author jjlee050 @Test public void checkStaffCredentials_invalidStaff_returnFalse() { clinicIo.addStaff(ADAM); assertFalse(clinicIo.checkStaffCredentials(ADAM)); } //@@author jjlee050 @Test public void checkStaffCredentials_validStaff_returnTrue() { clinicIo.addStaff(ADAM); Staff staffToCheck = new Staff(DOCTOR, new Name(VALID_NAME_ADAM), new Password(VALID_PASSWORD_ADAM, false)); assertTrue(clinicIo.checkStaffCredentials(staffToCheck)); } @Test public void getPersonList_modifyList_throwsUnsupportedOperationException() { thrown.expect(UnsupportedOperationException.class); clinicIo.getPersonList().remove(0); } //@@author jjlee050 @Test public void getPatientList_modifyList_throwsUnsupportedOperationException() { thrown.expect(UnsupportedOperationException.class); clinicIo.getPatientList().remove(0); } //@@author jjlee050 @Test public void getStaffList_modifyList_throwsUnsupportedOperationException() { thrown.expect(UnsupportedOperationException.class); clinicIo.getStaffList().remove(0); } //@@author gingivitiss @Test public void getAppointmentList_modifyList_throwsUnsupportedOperationException() { thrown.expect(UnsupportedOperationException.class); clinicIo.getAppointmentList().remove(0); } //@@author aaronseahyh @Test public void getMedicineList_modifyList_throwsUnsupportedOperationException() { thrown.expect(UnsupportedOperationException.class); clinicIo.getMedicineList().remove(0); } @Test public void getConsultationList_modifyList_throwsUnsupportedOperationException() { thrown.expect(UnsupportedOperationException.class); clinicIo.getConsultationList().remove(0); } /** * A stub ReadOnlyClinicIo whose persons list and staff list can violate interface constraints. */ private static class ClinicIoStub implements ReadOnlyClinicIo { private final ObservableList<Appointment> appointments = FXCollections.observableArrayList(); private final ObservableList<Patient> patients = FXCollections.observableArrayList(); private final ObservableList<Person> persons = FXCollections.observableArrayList(); private final ObservableList<Staff> staffs = FXCollections.observableArrayList(); private final ObservableList<Medicine> medicines = FXCollections.observableArrayList(); private final ObservableList<Consultation> consultations = FXCollections.observableArrayList(); ClinicIoStub(Collection<Appointment> appointments, Collection<Person> persons, Collection<Patient> patients, Collection<Staff> staffs, Collection<Medicine> medicines) { this.appointments.setAll(appointments); this.persons.setAll(persons); this.patients.setAll(patients); this.staffs.setAll(staffs); this.medicines.setAll(medicines); this.consultations.setAll(consultations); } @Override public ObservableList<Patient> getQueue() { return patients; } @Override public ObservableList<Person> getPersonList() { return persons; } @Override public ObservableList<Patient> getPatientList() { return patients; } @Override public ObservableList<Staff> getStaffList() { return staffs; } @Override public ObservableList<Appointment> getAppointmentList() { return appointments; } @Override public ObservableList<Medicine> getMedicineList() { return medicines; } @Override public ObservableList<Consultation> getConsultationList() { return consultations; } } }
<filename>node_modules/antd-mobile/lib/list/style/index.native.d.ts declare const _default: { underlayColor: { backgroundColor: string; }; Header: { fontSize: number; color: string; paddingHorizontal: number; paddingTop: number; paddingBottom: number; backgroundColor: string; }; Footer: { fontSize: number; color: string; paddingHorizontal: number; paddingVertical: number; backgroundColor: string; }; Body: { backgroundColor: string; borderTopWidth: number; borderTopColor: string; }; BodyBottomLine: { position: string; bottom: number; left: number; right: number; height: number; backgroundColor: string; borderBottomWidth: number; borderBottomColor: string; }; Item: { flexGrow: number; alignItems: string; flexDirection: string; paddingLeft: number; backgroundColor: string; }; Line: { flex: number; flexDirection: string; alignItems: string; paddingRight: number; paddingVertical: number; minHeight: number; borderBottomWidth: number; borderBottomColor: string; }; Thumb: { width: number; height: number; marginRight: number; }; Content: { color: string; fontSize: number; textAlignVertical: string; }; Extra: { color: string; fontSize: number; textAlign: string; textAlignVertical: string; }; Brief: { minHeight: number; }; BriefText: { color: string; fontSize: number; paddingTop: number; textAlignVertical: string; }; Arrow: { width: number; height: number; marginLeft: number; marginTop: number; }; ArrowV: { width: number; height: number; marginLeft: number; }; multipleLine: { paddingVertical: number; }; multipleThumb: { width: number; height: number; }; column: { flex: number; flexDirection: string; }; }; export default _default;
def predict_proba(self, test_df): cols = ['estimator_' + str(i) for i in range(1, self.n_est + 1)] test_tree = pd.DataFrame(np.column_stack([est.predict(test_df[self.x]) for est in self.tree_model.estimators_]), columns=cols) return self._predict(test_tree)
<gh_stars>0 import { ControlValueAccessor } from '@angular/forms'; import { Input } from '@angular/core'; import { OptionsProvider } from './providers/options-provider'; export class TagModelClass { [key: string]: any; } export type TagModel = string | TagModelClass; export function isObject(obj: any): boolean { return obj === Object(obj); } export class TagInputAccessor implements ControlValueAccessor { private _items: TagModel[] = []; private _onTouchedCallback: () => void; private _onChangeCallback: (items: TagModel[]) => void; /** * @name displayBy */ @Input() public displayBy: string = OptionsProvider.defaults.tagInput.displayBy; /** * @name identifyBy */ @Input() public identifyBy: string = OptionsProvider.defaults.tagInput.identifyBy; /** * Flag to toggle displaying the value of identifyBy or not. * If true, display item as <identifyBy>: <displayBy> * Else display item as <displayBy> * * @type {boolean} * @memberof TagInputAccessor */ @Input() public withCode: boolean = OptionsProvider.defaults.tagInput.withCode; public get items(): TagModel[] { return this._items; }; public set items(items: TagModel[]) { this._items = items; this._onChangeCallback(this._items); } public onTouched() { this._onTouchedCallback(); } public writeValue(items: any[]) { if (!items || items.length === 0) { this._items = []; } else { this._items = this.filterItems(items); } } public registerOnChange(fn: any) { this._onChangeCallback = fn; } public registerOnTouched(fn: any) { this._onTouchedCallback = fn; } /** * @name getItemValue * @param item */ public getItemValue(item: TagModel): string { return isObject(item) ? item[this.identifyBy] : item; } /** * @name getItemDisplay * @param item */ public getItemDisplay(item: TagModel): string { if (isObject(item)) { if (!item[this.displayBy]) { if (!item[this.identifyBy]) { return ''; } return item[this.identifyBy]; } return item[this.displayBy]; } return item.toString(); } /** * @name getItemsWithout * @param index */ protected getItemsWithout(index: number): TagModel[] { return this.items.filter((item, position) => position !== index); } /** * Filter list of items * Remove all items that * - item[this.identifyBy] is undefined * - item[this.identifyBy] AND item[this.displayBy] are undefined * * @private * @param {any[]} items * @returns * @memberof TagInputAccessor */ private filterItems(items: any[]) { return items .map(item => { if (typeof item === 'string') { return item; } else if (!item[this.identifyBy]) { // Set both indentifyBy and displayBy values // to undefined to filter later return { [this.identifyBy]: item[this.identifyBy], [this.displayBy]: item[this.identifyBy] }; } else { return item; } }) .filter(item => { if (!item[this.identifyBy] && !item[this.displayBy]) { return false; } return true; }); } }
NEW DELHI: ‘This is the longest pronouncement from a person who does not otherwise take much space’, Justice Rohinton Fali Nariman said before pronouncing the landmark verdict declaring Section 66A of I-T Act unconstitutional.Justice Nariman, who left his booming law practice to be a Supreme Court judge on July 7 last year, is known for his expertise in Constitutional, Corporate and Civil Law.An ordained priest from Bandra Agiary, Nariman is passionate about western classical music. He is known for delivering short and precise judgments. But considering the importance of the issue, his judgment on Section 66A ran into 123 pages.In another landmark verdict, Justice Nariman had held that plea of death convict for a review of the Supreme Court’s judgment upholding death penalty should be heard in open court by a bench of at least 3 judges. Prior to that all review petitions were decided in chambers without presence of advocates.Justice Nariman completed his bachelor in commerce prestigious Shri Ram College of Commerce and graduated in law from Faculty of Law, University of Delhi, where he was 2nd topper in the University. He did Masters from Harvard Law School. He was solicitor general of India from July 27, 2011 to February 4, 2013.He was designated as senior advocate by then Chief Justice of India M N Venkatachalaiah, who had to amend the rules to confer the recognition on him at the young age of 37 against the norm that mandated the advocate to be at least 45 years.Son of celebrated constitutional expert and a much respected lawyer Fali S Nariman, Rohinton does not like to bask in the reflected glory of his father. He enjoys his country home where he finds the environment conducive to listening to western classical music and reading books.He had assisted eminent lawyer Nani Palkhivala for a month in presenting arguments in the famous Minerva Mills case of 1980. The judgment in the case extended the Constitution’s “Basic Structure Doctrine”.
# -*- coding: utf-8 -*- # # Copyright (c) 2018 Red Hat, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Written by <NAME> <<EMAIL>> import koji from unittest.mock import patch, MagicMock import queue from freshmaker import db from freshmaker.events import ErrataAdvisoryRPMsSignedEvent from freshmaker.models import ArtifactBuild, Event from freshmaker.types import EventState, ArtifactBuildState from freshmaker.producer import FreshmakerProducer, _sa_disconnect_exceptions from tests import helpers class TestCheckUnfinishedKojiTasks(helpers.ModelsTestCase): def setUp(self): super(TestCheckUnfinishedKojiTasks, self).setUp() self.koji_read_config_patcher = patch( 'koji.read_config', return_value={'server': 'http://localhost/'}) self.koji_read_config_patcher.start() db_event = Event.get_or_create( db.session, "msg1", "current_event", ErrataAdvisoryRPMsSignedEvent) db_event.state = EventState.BUILDING self.build = ArtifactBuild.create(db.session, db_event, "parent1-1-4", "image") self.build.state = ArtifactBuildState.BUILD self.build.build_id = 10 db.session.commit() def tearDown(self): self.koji_read_config_patcher.stop() @patch('freshmaker.kojiservice.KojiService.get_task_info') @patch('freshmaker.consumer.get_global_consumer') def test_koji_task_failed(self, global_consumer, get_task_info): consumer = self.create_consumer() global_consumer.return_value = consumer get_task_info.return_value = {'state': koji.TASK_STATES['FAILED']} hub = MagicMock() producer = FreshmakerProducer(hub) producer.check_unfinished_koji_tasks(db.session) event = consumer.incoming.get() self.assertEqual(event.task_id, 10) self.assertEqual(event.new_state, "FAILED") @patch('freshmaker.kojiservice.KojiService.get_task_info') @patch('freshmaker.consumer.get_global_consumer') def test_koji_task_closed(self, global_consumer, get_task_info): consumer = self.create_consumer() global_consumer.return_value = consumer get_task_info.return_value = {'state': koji.TASK_STATES['CLOSED']} hub = MagicMock() producer = FreshmakerProducer(hub) producer.check_unfinished_koji_tasks(db.session) event = consumer.incoming.get() self.assertEqual(event.task_id, 10) self.assertEqual(event.new_state, "CLOSED") @patch('freshmaker.kojiservice.KojiService.get_task_info') @patch('freshmaker.consumer.get_global_consumer') def test_koji_task_dry_run(self, global_consumer, get_task_info): self.build.build_id = -10 consumer = self.create_consumer() global_consumer.return_value = consumer get_task_info.return_value = {'state': koji.TASK_STATES['CLOSED']} hub = MagicMock() producer = FreshmakerProducer(hub) producer.check_unfinished_koji_tasks(db.session) self.assertRaises(queue.Empty, consumer.incoming.get, block=False) @patch('freshmaker.kojiservice.KojiService.get_task_info') @patch('freshmaker.consumer.get_global_consumer') def test_koji_task_open(self, global_consumer, get_task_info): self.build.build_id = -10 consumer = self.create_consumer() global_consumer.return_value = consumer get_task_info.return_value = {'state': koji.TASK_STATES['OPEN']} hub = MagicMock() producer = FreshmakerProducer(hub) producer.check_unfinished_koji_tasks(db.session) self.assertRaises(queue.Empty, consumer.incoming.get, block=False) @patch('freshmaker.kojiservice.KojiService.get_task_info') @patch('freshmaker.consumer.get_global_consumer') def test_koji_invalid_request(self, global_consumer, get_task_info): from sqlalchemy import select self.build.build_id = -10 consumer = self.create_consumer() global_consumer.return_value = consumer get_task_info.return_value = {'state': koji.TASK_STATES['OPEN']} hub = MagicMock() producer = FreshmakerProducer(hub) # make new session to hold new connection and transaction my_session = db.session() # create connection and begin new transaction my_connection = my_session.connection() my_connection.begin() # invalidate one connection to simulate disconnect from DB my_connection.invalidate() # check if it will raise Statement Error with self.assertRaises(_sa_disconnect_exceptions): producer.check_unfinished_koji_tasks(my_session) # rollback session, and rollback invalid transaction inside it my_session.rollback() # check that new connection is created in our session self.assertIsNot(my_connection, my_session.connection()) # Check if connection to db is established again my_session.connection().scalar(select([1])) self.assertFalse(my_session.connection().invalidated)
/** * Consumes any updates waiting in the merger without notifying the underlying listener. */ public void reset() { _mergerLock.lock(); try { _callQueue.clear(); _currentResultCompilationIndex = -1; _resultIndex = -1; _futureResultCompilationIndex = -1; getCycleRetainer().replaceRetainedCycle(null); } finally { _mergerLock.unlock(); } }
Consulting the cost A breakdown of some of the $18.63 million the City of Saskatoon spent on consultants in 2015: $3.98 million on the two P3 projects, the north commuter bridge/Traffic bridge and the civic operations centre $1.89 million on water projects, including $1.15 million on the 42nd Street reservoir project $2.64 million on interchanges and bridges $7.43 million on other capital projects, like the city’s growth plan ($543,000) and the Remai Modern Art Gallery of Saskatchewan ($585,000) $600,000 for communications and marketing $321,000 on counselling services $213,000 on internal audit services that were outsourced $139,000 on website support The City of Saskatoon spent $18.63 million on external consultants in 2015 — more than the City of Toronto spent in any of the previous five years, according to each city’s numbers. Saskatoon’s price tag was also higher than the $11.33 million total the City of Regina spent on outside consultants for six years from 2009 to 2015. However, the City of Saskatoon’s chief financial officer said residents are receiving good value for the money and it’s cheaper to hire experts when you need them rather pay for permanent full-time staff. “It’s very cost effective,” Kerry Tarasoff said in an interview this week. According to a report on its way to Monday’s finance committee meeting, the city spent just under $16 million on consultants for capital projects and another $2.68 million for operations. The report defines a consultant as a professional who is contracted to offer expert advice. The report is a response to criticism of the city’s use of consultants in January by veteran Coun. Pat Lorje, who said city hall was becoming “consultant crazy.” “My jaw dropped,” Lorje said in an interview Wednesday after seeing the report. “I knew that things were getting a little out of hand, but I had no idea. Why don’t we just privatize the entire city?” Tarasoff said he could not account for the vast difference between Saskatoon and other cities like Toronto and Regina. Saskatoon did, however, contract out some services like internal auditing and included it on the list of consultants, he said. The city paid internal auditing consultants PricewaterhouseCoopers $213,000 in 2015. Tarasoff said different cities may define consulting in different ways, making comparisons difficult. “You need the expertise,” he said. “You need the information. You need advice. We don’t have it. To me, there’s a risk if you don’t do it. Are we prepared to take on that risk? No, I think we’re prepared to spend a little money and get advice that’s going to be professional, and it fills a void that we don’t have.” Tarasoff did not have consulting spending from other years to compare with 2015, but said it was slightly higher than usual. The total number of consulting payments in 2015 was 229. The City of Regina, by comparison, lists 20 consultants for 2015. The City of Toronto lists 144 payments to consultants in 2014, the most recent year available on the city’s website. Toronto’s website updates a full list of consultants and payments each year. The City of Saskatoon spent just under $4 million on consultants in 2015 for its two big P3 projects, the civic operations centre and the venture to build two new bridges. “Engineers, for example, building bridges and interchanges — we just don’t have them sitting in the back closet waiting and pull them out when you need them,” Tarasoff said. “My jaw dropped. I knew that things were getting a little out of hand, but I had no idea. Why don’t we just privatize the entire city?” — Coun. Pat Lorje Lorje remained unconvinced and said the spending is “out of whack” with other cities. The city’s reliance on consultants is growing, she added. “What are our (city hall) staff doing?” she asked. “It’s absurd. We have been building interchanges and bridges for years in this city. Have we learned nothing?” The city employed 127 people with the title of either engineer or engineering technologist in 2014. The report says the city uses consultants for outside expertise in specialized fields like management, engineering, accounting, law, human resources and marketing. In 2014, the city employed seven accountants and another 54 people with accounting in their title, like clerks; 11 solicitors and one lawyer; 10 human resources consultants and nine marketing managers or directors. In total, the city employed 3,526 people in 2014. By comparison, Regina spent just over $1.75 million on consultants in 2015. The most the City of Toronto spent on consultants from 2010 to 2014 was $18.3 million in 2012 — still less than Saskatoon in 2015. “When you take it into context of a billion-dollar budget, $18 million is not a lot of money,” Tarasoff said. [email protected] twitter.com/thinktankSK
/** The jessop ScriptEngine class. * * @author knoxg */ public class JessopScriptEngine extends AbstractScriptEngine implements Compilable { /** Logger instance for this class */ Logger logger = Logger.getLogger(JessopScriptEngine.class); /** ScriptEngineFactory that created this class */ ScriptEngineFactory factory; /** Reserved key for a named value that identifies the initial language used for jessop scripts. * If not set, will default to 'javascript' */ public static final String JESSOP_LANGUAGE = "com.randommoun.common.jessop.language"; /** Default value for the JESSOP_LANGUAGE key; has the value "javascript" */ public static final String JESSOP_DEFAULT_LANGUAGE = "javascript"; /** Reserved key for a named value that identifies the initial ScriptEngine used for jessop scripts. * If not set, will use the default engine for the default language. */ public static final String JESSOP_ENGINE = "com.randommoun.common.jessop.engine"; /** Default value for the JESSOP_ENGINE key; has the value "rhino" */ public static final String JESSOP_DEFAULT_ENGINE = "rhino"; /** Reserved key for a named value that sets the initial exception converter. * If not set, will use the default converter for the default language */ public static final String JESSOP_EXCEPTION_CONVERTER = "com.randommoun.common.jessop.exceptionConverter"; /** Default value for the JESSOP_EXCEPTION_CONVERTER key; has the value null */ public static final String JESSOP_DEFAULT_EXCEPTION_CONVERTER = null; /** Reserved key for a named value that sets the initial bindings converter. * If not set, will use the default converter for the default language */ public static final String JESSOP_BINDINGS_CONVERTER = "com.randommoun.common.jessop.bindingsConverter"; /** Default value for the JESSOP_EXCEPTION_CONVERTER key; has the value null */ // so this isn't final any more, since it might change depending on what's on the classpath public static String JESSOP_DEFAULT_BINDINGS_CONVERTER; // = "com.randomnoun.common.jessop.engine.jvmRhino.JvmRhinoBindingsConverter"; static { JavascriptJessopScriptBuilder jjsb = new JavascriptJessopScriptBuilder(); JESSOP_DEFAULT_BINDINGS_CONVERTER = jjsb.getDefaultBindingsConverterClassName(); } /** Reserved key for a named value that controls whether the target script is compiled * (providing the target engine allows it). * If not set, will default to 'false'. */ public static final String JESSOP_COMPILE_TARGET = "com.randommoun.common.jessop.compileTarget"; /** Default value for the JESSOP_COMPILE_TARGET key; has the value "false" */ public static final String JESSOP_DEFAULT_COMPILE_TARGET = "false"; /** Reserved key for a named value that controls whether the target script * will have EOLs suppressed after scriptlets that appear at the end of a line. * If not set, will default to 'false'. */ public static final String JESSOP_SUPPRESS_EOL = "com.randommoun.common.jessop.suppressEol"; /** Default value for the JESSOP_SUPPRESS_EOL key; has the value "false" */ public static final String JESSOP_DEFAULT_SUPPRESS_EOL = "false"; // so I guess we implement this twice then // let's always compile it if we can /** {@inheritDoc} */ @Override public Object eval(String script, ScriptContext context) throws ScriptException { CompiledScript cscript = compile(script); return cscript.eval(context); } /** {@inheritDoc} */ @Override public Object eval(Reader reader, ScriptContext context) throws ScriptException { CompiledScript cscript = compile(reader); return cscript.eval(context); } // if the user doesn't supply a context, we evaluate it with a null context (not the default context) // the JessopCompiledScript will then use the appropriate language's default context instead /** {@inheritDoc} */ @Override public Object eval(String script) throws ScriptException { return eval(script, (ScriptContext) null); } /** {@inheritDoc} */ @Override public Object eval(Reader reader) throws ScriptException { return eval(reader, (ScriptContext) null); } /** {@inheritDoc} */ @Override public Bindings createBindings() { return new SimpleBindings(); } /** {@inheritDoc} */ @Override public ScriptEngineFactory getFactory() { if (factory != null) { return factory; } else { return new JessopScriptEngineFactory(); } } // package private; called by ScriptEngineFactory only void setEngineFactory(ScriptEngineFactory fac) { factory = fac; } @Override /** {@inheritDoc} */ public CompiledScript compile(String script) throws ScriptException { return compile(new StringReader(script)); } /** {@inheritDoc} */ @Override public CompiledScript compile(Reader script) throws ScriptException { try { // if (initialLanguage == null) { initialLanguage = "javascript"; } JessopDeclarations declarations = new JessopDeclarations(); // jessop defaults declarations.setEngine(JESSOP_DEFAULT_ENGINE); declarations.setExceptionConverter(JESSOP_DEFAULT_EXCEPTION_CONVERTER); declarations.setBindingsConverter(JESSOP_DEFAULT_BINDINGS_CONVERTER); declarations.setCompileTarget(Boolean.valueOf(JESSOP_DEFAULT_COMPILE_TARGET)); declarations.setSuppressEol(Boolean.valueOf(JESSOP_DEFAULT_SUPPRESS_EOL)); // ScriptEngine defaults String filename = (String) get(ScriptEngine.FILENAME); String initialLanguage = (String) get(JessopScriptEngine.JESSOP_LANGUAGE); String initialEngine = (String) get(JessopScriptEngine.JESSOP_ENGINE); String initialExceptionConverter = (String) get(JessopScriptEngine.JESSOP_EXCEPTION_CONVERTER); String initialBindingsConverter = (String) get(JessopScriptEngine.JESSOP_BINDINGS_CONVERTER); String initialCompileTarget = (String) get(JessopScriptEngine.JESSOP_COMPILE_TARGET); String initialSuppressEol = (String) get(JessopScriptEngine.JESSOP_SUPPRESS_EOL); if (initialLanguage==null) { initialLanguage = JESSOP_DEFAULT_LANGUAGE; } if (filename!=null) { declarations.setFilename(filename); } if (initialEngine!=null) { declarations.setEngine(initialEngine); } if (initialExceptionConverter!=null) { declarations.setExceptionConverter(initialExceptionConverter); } if (initialBindingsConverter!=null) { declarations.setBindingsConverter(initialBindingsConverter); } if (initialCompileTarget!=null) { declarations.setCompileTarget(Boolean.valueOf(initialCompileTarget)); } if (initialSuppressEol!=null) { declarations.setSuppressEol(Boolean.valueOf(initialSuppressEol)); } ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); // new JavascriptJessopScriptBuilder(); // default for now JessopScriptBuilder jsb = ((JessopScriptEngineFactory) getFactory()).getJessopScriptBuilderForLanguage(initialLanguage); jsb.setPrintWriter(pw); Tokeniser t = new Tokeniser(this, jsb); jsb.setTokeniserAndDeclarations(t, declarations); // tokenise the script int ch = script.read(); while (ch!=-1) { t.parseChar((char) ch); ch = script.read(); } t.parseEndOfFile(); pw.flush(); // get the output from the PrintWriter String newScript = baos.toString(); // the final JSB contains the final declarations that were in effect declarations = t.jsb.getDeclarations(); // get this from the jessop declaration eventally, but for now: // if the underlying engine supports compilation, then compile that here, otherwise just store the source ScriptEngine engine = new ScriptEngineManager().getEngineByName(declarations.engine); // nashorn in JDK9 if (engine==null) { throw new ScriptException("java.scriptx engine '" + declarations.engine + "' not found"); } JessopBindingsConverter jbc = null; if (declarations.bindingsConverter !=null && !declarations.bindingsConverter.equals("")) { try { jbc = (JessopBindingsConverter) Class.forName(declarations.bindingsConverter).newInstance(); } catch (Exception e) { throw (ScriptException) new ScriptException( "bindingsConverter '" + declarations.bindingsConverter + "' not loaded").initCause(e); } } JessopExceptionConverter jec = null; if (declarations.exceptionConverter!=null && !declarations.exceptionConverter.equals("")) { try { jec = (JessopExceptionConverter) Class.forName(declarations.exceptionConverter).newInstance(); } catch (Exception e) { throw (ScriptException) new ScriptException( "exceptionConverter '" + declarations.exceptionConverter + "' not loaded").initCause(e); } } // com.sun.script.javascript.RhinoScriptEngine m = (com.sun.script.javascript.RhinoScriptEngine) engine; // the newScript is compiled here, if the engine supports it return new JessopCompiledScript(engine, declarations.isCompileTarget(), declarations.getFilename(), newScript, jec, jbc); } catch (IOException ioe) { throw new ScriptException(ioe); } } }
/** * Dispatch an event that has occurred to all registered observers. * @param observableEvent the observable event to dispatch. */ public void eventOccurred(final T observableEvent) { List<Observer<T>> clone = null; synchronized (observerList) { clone = new ArrayList<Observer<T>>(); clone.addAll(observerList); } for (Observer<T> listener : clone) { listener.eventOccurred(observableEvent); } }
. A total of 51 patients with the subclavian steal-syndrome (SSS) were examined for the clinical picture comparatively to the angiography and ultrasound dopplerography (USDG) data. The clinical manifestations of cerebral circulatory disorders were studied not only in the isolated SSS but also in its different variants realized in association with concomitant lesions of the other branches of the aortic arch. Rarely occurring variants of the SSS were revealed; some of them were discovered for the first time. The types of collateral circulation to compensate for arm blood supply in the SSS were specified using the authors' material. The studies have confirmed that USDG is a fairly informative method of diagnosing the SSS.
/** Read a Grid from a file in ESRI ASCII format. * @param fileName The path to the file to be read. * @param progress A WorkerProgress to inform about the progress. * @return The read grid. */ public static GeoGrid read(String filePath, ProgressIndicator progressIndicator) throws java.io.IOException { File file = new File(filePath); InputStream fis = new FileInputStream(file.getAbsolutePath()); EsriASCIIGridReader esriReader = new EsriASCIIGridReader(); GeoGrid grid = esriReader.read(fis, progressIndicator); if (progressIndicator != null && progressIndicator.isAborted()) { return null; } String name = file.getName(); if (!"".equals(name)) { grid.setName(name); } return grid; }
def check_series_of_event(series_of_event, has_series, no_series_error): errors = [] if len(series_of_event) > 1: errors.append(more_series()) if not series_of_event and has_series: errors.append(series_not_found()) if not has_series and no_series_error: errors.append(no_series()) return errors
<reponame>Vinovest/contentful-go package contentful import ( "encoding/json" "time" ) // FieldValidation interface type FieldValidation interface{} // FieldValidationLink model type FieldValidationLink struct { LinkContentType []string `json:"linkContentType,omitempty"` } const ( // MimeTypeAttachment mime type validation for content type field MimeTypeAttachment = "attachment" // MimeTypePlainText mime type validation for content type field MimeTypePlainText = "plaintext" // MimeTypeImage mime type validation for content type field MimeTypeImage = "image" // MimeTypeAudio mime type validation for content type field MimeTypeAudio = "audio" // MimeTypeVideo mime type validation for content type field MimeTypeVideo = "video" // MimeTypeRichText mime type validation for content type field MimeTypeRichText = "richtext" // MimeTypePresentation mime type validation for content type field MimeTypePresentation = "presentation" // MimeTypeSpreadSheet mime type validation for content type field MimeTypeSpreadSheet = "spreadsheet" // MimeTypePDF mime type validation for content type field MimeTypePDF = "pdfdocument" // MimeTypeArchive mime type validation for content type field MimeTypeArchive = "archive" // MimeTypeCode mime type validation for content type field MimeTypeCode = "code" // MimeTypeMarkup mime type validation for content type field MimeTypeMarkup = "markup" ) // FieldValidationMimeType model type FieldValidationMimeType struct { MimeTypes []string `json:"linkMimetypeGroup,omitempty"` } // MinMax model type MinMax struct { Min float64 `json:"min,omitempty"` Max float64 `json:"max,omitempty"` } // DateMinMax model type DateMinMax struct { Min time.Time `json:"min,omitempty"` Max time.Time `json:"max,omitempty"` } // FieldValidationDimension model type FieldValidationDimension struct { Width *MinMax `json:"width,omitempty"` Height *MinMax `json:"height,omitempty"` ErrorMessage string `json:"message,omitempty"` } // MarshalJSON for custom json marshaling func (v *FieldValidationDimension) MarshalJSON() ([]byte, error) { type dimension struct { Width *MinMax `json:"width,omitempty"` Height *MinMax `json:"height,omitempty"` } return json.Marshal(&struct { AssetImageDimensions *dimension `json:"assetImageDimensions,omitempty"` Message string `json:"message,omitempty"` }{ AssetImageDimensions: &dimension{ Width: v.Width, Height: v.Height, }, Message: v.ErrorMessage, }) } // UnmarshalJSON for custom json unmarshaling func (v *FieldValidationDimension) UnmarshalJSON(data []byte) error { payload := map[string]interface{}{} if err := json.Unmarshal(data, &payload); err != nil { return err } dimensionData := payload["assetImageDimensions"].(map[string]interface{}) if width, ok := dimensionData["width"].(map[string]interface{}); ok { v.Width = &MinMax{} if min, ok := width["min"].(float64); ok { v.Width.Min = min } if max, ok := width["min"].(float64); ok { v.Width.Max = max } } if height, ok := dimensionData["height"].(map[string]interface{}); ok { v.Height = &MinMax{} if min, ok := height["min"].(float64); ok { v.Height.Min = min } if max, ok := height["max"].(float64); ok { v.Height.Max = max } } if val, ok := payload["message"].(string); ok { v.ErrorMessage = val } return nil } // FieldValidationFileSize model type FieldValidationFileSize struct { Size *MinMax `json:"assetFileSize,omitempty"` ErrorMessage string `json:"message,omitempty"` } // FieldValidationUnique model type FieldValidationUnique struct { Unique bool `json:"unique"` } // FieldValidationPredefinedValues model type FieldValidationPredefinedValues struct { In []interface{} `json:"in,omitempty"` ErrorMessage string `json:"message"` } // FieldValidationRange model type FieldValidationRange struct { Range *MinMax `json:"range,omitempty"` ErrorMessage string `json:"message,omitempty"` } // FieldValidationDate model type FieldValidationDate struct { Range *DateMinMax `json:"dateRange,omitempty"` ErrorMessage string `json:"message,omitempty"` } // MarshalJSON for custom json marshaling func (v *FieldValidationDate) MarshalJSON() ([]byte, error) { type dateRange struct { Min string `json:"min,omitempty"` Max string `json:"max,omitempty"` } return json.Marshal(&struct { DateRange *dateRange `json:"dateRange,omitempty"` Message string `json:"message,omitempty"` }{ DateRange: &dateRange{ Min: v.Range.Max.Format("2006-01-02T03:04:05"), Max: v.Range.Max.Format("2006-01-02T03:04:05"), }, Message: v.ErrorMessage, }) } // UnmarshalJSON for custom json unmarshaling func (v *FieldValidationDate) UnmarshalJSON(data []byte) error { payload := map[string]interface{}{} if err := json.Unmarshal(data, &payload); err != nil { return err } dateRangeData := payload["dateRange"].(map[string]interface{}) v.Range = &DateMinMax{} if min, ok := dateRangeData["min"].(string); ok { minDate, err := time.Parse("2006-01-02T03:04:05", min) if err != nil { return err } v.Range.Min = minDate } if max, ok := dateRangeData["max"].(string); ok { maxDate, err := time.Parse("2006-01-02T03:04:05", max) if err != nil { return err } v.Range.Max = maxDate } if val, ok := payload["message"].(string); ok { v.ErrorMessage = val } return nil } // FieldValidationSize model type FieldValidationSize struct { Size *MinMax `json:"size,omitempty"` ErrorMessage string `json:"message,omitempty"` } //noinspection GoUnusedConst const ( // FieldValidationRegexPatternEmail email validation FieldValidationRegexPatternEmail = `^\w[\w.-]*@([\w-]+\.)+[\w-]+$` // FieldValidationRegexPatternURL url validation FieldValidationRegexPatternURL = `^(ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?$` // FieldValidationRegexPatternUSDate us date validation FieldValidationRegexPatternUSDate = `^(0?[1-9]|[12][0-9]|3[01])[- \/.](0?[1-9]|1[012])[- \/.](19|20)?\d\d$` // FieldValidationRegexPatternEuorpeanDate euorpean date validation FieldValidationRegexPatternEuorpeanDate = `^(0?[1-9]|[12][0-9]|3[01])[- \/.](0?[1-9]|1[012])[- \/.](19|20)?\d\d$` // FieldValidationRegexPattern12HourTime 12-hour time validation FieldValidationRegexPattern12HourTime = `^(0?[1-9]|1[012]):[0-5][0-9](:[0-5][0-9])?\s*[aApP][mM]$` // FieldValidationRegexPattern24HourTime 24-hour time validation FieldValidationRegexPattern24HourTime = `^(0?[0-9]|1[0-9]|2[0-3]):[0-5][0-9](:[0-5][0-9])?$` // FieldValidationRegexPatternUSPhoneNumber us phone number validation FieldValidationRegexPatternUSPhoneNumber = `^\d[ -.]?\(?\d\d\d\)?[ -.]?\d\d\d[ -.]?\d\d\d\d$` // FieldValidationRegexPatternUSZipCode us zip code validation FieldValidationRegexPatternUSZipCode = `^\d{5}$|^\d{5}-\d{4}$}` ) // Regex model type Regex struct { Pattern string `json:"pattern,omitempty"` Flags string `json:"flags,omitempty"` } // FieldValidationRegex model type FieldValidationRegex struct { Regex *Regex `json:"regexp,omitempty"` ErrorMessage string `json:"message,omitempty"` }
async def find_result_url(session, url, headers): try: async with session.get(url[1], headers=headers) as response: if response.status == 200: url_result = url[0] except (aiohttp.ClientResponseError, aiohttp.ClientConnectorError): asyncio.current_task().remove_done_callback(asyncio.current_task) asyncio.current_task().cancel() url_result = "" return url_result
Fabrication and piezoelectric properties of (K0.5Na0.5)NbO3-based ceramics doped with Bi-perovskites The effect of the addition of Bi-perovskites to (Li<sub>0.04</sub>K<sub>0.52</sub>Na<sub>0.44</sub>)(Nb<sub>0.84</sub>Ta<sub>0.1</sub>Sb<sub>0.06</sub>)O<sub>3</sub> (LF4) ceramics was studied to improve the properties of lead-free piezoelectrics. Ceramics with the relative density above 96% were successfully obtained by a solid state reaction process. The changes in lattice parameters indicated that the vacancy concentration at the A-site of the perovskite structure and the tetragonality of the crystal lattice were reduced with the addition of BiFeO<sub>3</sub>. The addition of BiFeO<sub>3</sub> changed the LF4 to a soft-piezoelectric. The electrical insulation of the LF4 was improved with the addition of BiFeO<sub>3</sub>, which made a full poling possible under high electric fields. A few mol% of BiFeO<sub>3</sub> was effective to improve the remanent polarization of the LF4 ceramics and a piezoelectric d<sub>33</sub> constant of about 358 pm/V was obtained in the 0.4 mol% doped ceramics.
# Copyright 2021 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file implements wrappers to some of the functions in fqe.wick """ from ctypes import c_int, c_double from typing import List, Tuple, Optional import numpy from numpy.ctypeslib import ndpointer from fqe.lib import lib_fqe def _wickfill(target: numpy.ndarray, source: Optional[numpy.ndarray], indices: Optional[numpy.ndarray], factor: float, delta: Optional[numpy.ndarray]) -> numpy.ndarray: """ This function is an internal utility that wraps a C-implementation to fill in custom RDMs using particle RDMs. The result of Wick's theorem is passed as lists (indices and delta) and a factor associated with it. The results are stored in target. Args: target (numpy.ndarray) - output array that stores reordered RDMs source (numpy.ndarray) - input array that stores one of the particle \ RDMs indices (numpy.ndarray) - index mapping factor (float) - factor associated with this contribution delta (numpy.ndarray) - Kronecker delta's due to Wick's theorem Returns: target (numpy.ndarray) - Returns the output array. If target in the \ input Args was not C-contigious, this can be a new numpy object. """ func = lib_fqe.wickfill func.argtypes = [ ndpointer(dtype=numpy.complex128, flags=('C_CONTIGUOUS', 'ALIGNED')), ndpointer(dtype=numpy.complex128, flags=('C_CONTIGUOUS', 'ALIGNED')), ndpointer(dtype=numpy.uint32, flags=('C_CONTIGUOUS', 'ALIGNED')), c_double, ndpointer(dtype=numpy.uint32, flags=('C_CONTIGUOUS', 'ALIGNED')), c_int, c_int, c_int ] norb = target.shape[0] srank = len(source.shape) // 2 if source is not None else 0 trank = len(target.shape) // 2 if indices is None or len(indices) == 0: indices = numpy.zeros((1,), dtype=numpy.uint32) if delta is None or len(delta) == 0: delta = numpy.zeros((1,), dtype=numpy.uint32) if source is None or len(source) == 0: source = numpy.zeros((1,), dtype=numpy.complex128) # Fixes if target or source is not C contigious if not target.flags['C']: target = target.copy() if not source.flags['C']: source = source.copy() func(target, source, indices, factor, delta, norb, trank, srank) return target
package br.com.randomthings.exception; import lombok.AllArgsConstructor; import lombok.Data; @Data @AllArgsConstructor public class StrategyValidation extends RuntimeException { StringBuilder errors; }
Will Students Earnestly Attempt Learning Questions if Answers are Viewable Modern online learning materials may include built­in learning questions that are used for some of a class' homework points. To encourage learning, question solutions may be easily available to students. We sought to determine to what extent students earnestly attempt to answer learning questions when solutions are available via a simple button click. An earnest attempt means to try answering a question at least once before viewing the solution. We analyzed data from 550 students in four classes, at a four­year public research university, a four­year public teaching college, and two community colleges. We found average earnestness was a rather high 84%. We also found that 89% of students earnestly attempted 60%­100% of questions, with 73% earnestly attempting 80%­100%. Only 1% of students blatantly "cheat the system" by earnestly attempting less than 20% of questions. Thus, the heartening conclusion is that students will take advantage of a well­designed learning opportunity rather than just quickly earning points. We noted that earnestness decreased as a course progressed, with analyses indicating the decrease being mostly due to tiredness or some other student factor, rather than increasing difficulty. We also found that analyzing per­question earnestness can help question authors find questions that need improvement. In addition to providing results of our earnestness analysis, this paper also describes the style by which the learning questions were made ­­ how they complement text, teach rather than test, take only a small amount of time each, always include explanations, strive to occasionally "trick" students to explicitly dispel common misconceptions, avoid drill/kill and instead each teach a unique concept, create questions where alternative right answers are less likely to be marked wrong, and more. Via such design, students seem to discover that the questions are worth their time and effort, and thus most students earnestly try. We also discuss processes that can hurt earnestness, such as assigning excessive work. We describe our philosophy of not limiting student attempts for such learning questions, to create a safe learning environment (whereas other activities may indeed benefit from limits).
Calculation of glacier elevation changes with SRTM: is there an elevation-dependent bias? The freely available Shuttle Radar Topography Mission (SRTM3; 300 or 90m resolution) digital elevation model (DEM) provides the opportunity to calculate changes in glacier surface elevation and thus ice mass for a large sample of glaciers at the same time. Depending on the availability and quality of an earlier DEM, overall changes for a period of a few decades can be calculated. Such changes have already been computed for several mountain ranges, and in all cases a massive thinning (down-wasting) of the lower parts of large and flat glacier tongues has been shown (e.g. Rignot and others, 2003; Surazakov and Aizen, 2006; Larsen and others, 2007; Schiefer and others, 2007). Moreover, all the studies point to an acceleration of glacier wastage in the past decade and the contribution of glacier melt to global sea-level rise has been frequently revised upwards. In this respect, some of the studies (not all) corrected the SRTM3 DEM at higher elevations for a systematic bias that has been reported by Berthier and others (2006) for stable non-glacier terrain. With this elevation-dependent correction, the elevation change in the accumulation area of glaciers is reduced and leads to a smaller estimate of the total volume change (cf. Schiefer and others, 2007). However, other recent studies have not found such a bias (Larsen and others, 2007) or have even applied a correction in the opposite direction (Möller and others, 2007). As the DEMs used for comparison with the SRTM3 DEM largely differ in quality, it is difficult to say whether SRTM elevations should be corrected or not. In a new study (Paul and Haeberli, in press), elevation changes have been calculated for the glaciers in the Swiss Alps by differencing the SRTM3 DEM (resampled to 25m resolution) from a swisstopo DEM from about 1985 with the same resolution. A similar bias to that described by Berthier and others (2006) was found for non-glacier terrain. In order to assess whether this trend is only within the SRTM3 DEM (and must thus also be corrected over glacier areas), the influence of spatial resolution on DEM elevations has been assessed with an experiment based solely on the swisstopo DEM, that is, not influenced by problems resulting from comparing DEMs of different sources. The 25m cells of the swisstopo DEM were averaged (mean value) to 100m cells and resampled (bilinear) back to 25m cells. Then the original DEM was subtracted from the resampled DEM. Finally, mean elevation differences were calculated in 100m bins over glacier and non-glacier terrain (Fig. 1). Over non-glacier terrain, mean elevation differences are small below 2500m, but at higher elevations both DEMs increasingly deviate by about –4m at 3000ma.s.l. and –8m at 4000ma.s.l. This is similar to the correction factors suggested by Berthier and others (2006) and applied in some other studies (Surazakov and Aizen, 2006; Schiefer and others, 2007). Based on this resolution-dependent bias, one might consider subtracting the SRTM3 DEM from the averaged 100m swisstopo DEM. A quick analysis reveals (not shown here) that the elevation differences still exist (although they are smaller), because the generation of the swisstopo DEM also included additional vector datasets (e.g. breaklines, rivers, lake contours, spot elevations), which create a more pointed DEM (with steeper slopes) than without such data. Over glacier surfaces only, there is also a trend with elevation (Fig. 1), but only above 3000m, and up to 3500m the differences are slightly positive. The reason for the systematic trend (or bias) over nonglacier terrain is that steep ridges/mountain crests are more frequent towards higher elevations and the elevation of these structures is underestimated in the coarser-resolution DEM. On the other hand, steep terrain at lower elevations is often related to gorges or conduits where elevations are overestimated in the coarser-resolution DEM. Hence, slope is not a unique indicator for the elevation difference, as plan curvature (convex, concave) determines the sign of the difference. When the sign is neglected, elevation differences exponentially increase with mean slope. For glacier surfaces, mean slope increases with elevation (due to steeper regions towards the bergschrund) and elevation differences reach Paul: Correspondence
<reponame>SFIP/SFIP // SPDX-License-Identifier: GPL-2.0 /* * Write ahead logging implementation copyright <NAME> 2000 * * The background commits make this code very interrelated, and * overly complex. I need to rethink things a bit....The major players: * * journal_begin -- call with the number of blocks you expect to log. * If the current transaction is too * old, it will block until the current transaction is * finished, and then start a new one. * Usually, your transaction will get joined in with * previous ones for speed. * * journal_join -- same as journal_begin, but won't block on the current * transaction regardless of age. Don't ever call * this. Ever. There are only two places it should be * called from, and they are both inside this file. * * journal_mark_dirty -- adds blocks into this transaction. clears any flags * that might make them get sent to disk * and then marks them BH_JDirty. Puts the buffer head * into the current transaction hash. * * journal_end -- if the current transaction is batchable, it does nothing * otherwise, it could do an async/synchronous commit, or * a full flush of all log and real blocks in the * transaction. * * flush_old_commits -- if the current transaction is too old, it is ended and * commit blocks are sent to disk. Forces commit blocks * to disk for all backgrounded commits that have been * around too long. * -- Note, if you call this as an immediate flush from * within kupdate, it will ignore the immediate flag */ #include <linux/time.h> #include <linux/semaphore.h> #include <linux/vmalloc.h> #include "reiserfs.h" #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/uaccess.h> #include <linux/slab.h> /* gets a struct reiserfs_journal_list * from a list head */ #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_list)) /* must be correct to keep the desc and commit structs at 4k */ #define JOURNAL_TRANS_HALF 1018 #define BUFNR 64 /*read ahead */ /* cnode stat bits. Move these into reiserfs_fs.h */ /* this block was freed, and can't be written. */ #define BLOCK_FREED 2 /* this block was freed during this transaction, and can't be written */ #define BLOCK_FREED_HOLDER 3 /* used in flush_journal_list */ #define BLOCK_NEEDS_FLUSH 4 #define BLOCK_DIRTIED 5 /* journal list state bits */ #define LIST_TOUCHED 1 #define LIST_DIRTY 2 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */ /* flags for do_journal_end */ #define FLUSH_ALL 1 /* flush commit and real blocks */ #define COMMIT_NOW 2 /* end and commit this transaction */ #define WAIT 4 /* wait for the log blocks to hit the disk */ static int do_journal_end(struct reiserfs_transaction_handle *, int flags); static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int can_dirty(struct reiserfs_journal_cnode *cn); static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb); static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static void dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); static void flush_async_commits(struct work_struct *work); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ enum { JBEGIN_REG = 0, /* regular journal begin */ /* join the running transaction if at all possible */ JBEGIN_JOIN = 1, /* called from cleanup code, ignores aborted flag */ JBEGIN_ABORT = 2, }; static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join); static void init_journal_hash(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); } /* * clears BH_Dirty and sticks the buffer on the clean list. Called because * I can't allow refile_buffer to make schedule happen after I've freed a * block. Look at remove_from_transaction and journal_mark_freed for * more details. */ static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) { if (bh) { clear_buffer_dirty(bh); clear_buffer_journal_test(bh); } return 0; } static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block *sb) { struct reiserfs_bitmap_node *bn; static int id; bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS); if (!bn) { return NULL; } bn->data = kzalloc(sb->s_blocksize, GFP_NOFS); if (!bn->data) { kfree(bn); return NULL; } bn->id = id++; INIT_LIST_HEAD(&bn->list); return bn; } static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; struct list_head *entry = journal->j_bitmap_nodes.next; journal->j_used_bitmap_nodes++; repeat: if (entry != &journal->j_bitmap_nodes) { bn = list_entry(entry, struct reiserfs_bitmap_node, list); list_del(entry); memset(bn->data, 0, sb->s_blocksize); journal->j_free_bitmap_nodes--; return bn; } bn = allocate_bitmap_node(sb); if (!bn) { yield(); goto repeat; } return bn; } static inline void free_bitmap_node(struct super_block *sb, struct reiserfs_bitmap_node *bn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); journal->j_used_bitmap_nodes--; if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) { kfree(bn->data); kfree(bn); } else { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } } static void allocate_bitmap_nodes(struct super_block *sb) { int i; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) { bn = allocate_bitmap_node(sb); if (bn) { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } else { /* this is ok, we'll try again when more are needed */ break; } } } static int set_bit_in_list_bitmap(struct super_block *sb, b_blocknr_t block, struct reiserfs_list_bitmap *jb) { unsigned int bmap_nr = block / (sb->s_blocksize << 3); unsigned int bit_nr = block % (sb->s_blocksize << 3); if (!jb->bitmaps[bmap_nr]) { jb->bitmaps[bmap_nr] = get_bitmap_node(sb); } set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data); return 0; } static void cleanup_bitmap_list(struct super_block *sb, struct reiserfs_list_bitmap *jb) { int i; if (jb->bitmaps == NULL) return; for (i = 0; i < reiserfs_bmap_count(sb); i++) { if (jb->bitmaps[i]) { free_bitmap_node(sb, jb->bitmaps[i]); jb->bitmaps[i] = NULL; } } } /* * only call this on FS unmount. */ static int free_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array) { int i; struct reiserfs_list_bitmap *jb; for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; cleanup_bitmap_list(sb, jb); vfree(jb->bitmaps); jb->bitmaps = NULL; } return 0; } static int free_bitmap_nodes(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct list_head *next = journal->j_bitmap_nodes.next; struct reiserfs_bitmap_node *bn; while (next != &journal->j_bitmap_nodes) { bn = list_entry(next, struct reiserfs_bitmap_node, list); list_del(next); kfree(bn->data); kfree(bn); next = journal->j_bitmap_nodes.next; journal->j_free_bitmap_nodes--; } return 0; } /* * get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. * jb_array is the array to be filled in. */ int reiserfs_allocate_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array, unsigned int bmap_nr) { int i; int failed = 0; struct reiserfs_list_bitmap *jb; int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *); for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; jb->bitmaps = vzalloc(mem); if (!jb->bitmaps) { reiserfs_warning(sb, "clm-2000", "unable to " "allocate bitmaps for journal lists"); failed = 1; break; } } if (failed) { free_list_bitmaps(sb, jb_array); return -1; } return 0; } /* * find an available list bitmap. If you can't find one, flush a commit list * and try again */ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb, struct reiserfs_journal_list *jl) { int i, j; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_list_bitmap *jb = NULL; for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) { i = journal->j_list_bitmap_index; journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS; jb = journal->j_list_bitmap + i; if (journal->j_list_bitmap[i].journal_list) { flush_commit_list(sb, journal->j_list_bitmap[i]. journal_list, 1); if (!journal->j_list_bitmap[i].journal_list) { break; } } else { break; } } /* double check to make sure if flushed correctly */ if (jb->journal_list) return NULL; jb->journal_list = jl; return jb; } /* * allocates a new chunk of X nodes, and links them all together as a list. * Uses the cnode->next and cnode->prev pointers * returns NULL on failure */ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) { struct reiserfs_journal_cnode *head; int i; if (num_cnodes <= 0) { return NULL; } head = vzalloc(array_size(num_cnodes, sizeof(struct reiserfs_journal_cnode))); if (!head) { return NULL; } head[0].prev = NULL; head[0].next = head + 1; for (i = 1; i < num_cnodes; i++) { head[i].prev = head + (i - 1); head[i].next = head + (i + 1); /* if last one, overwrite it after the if */ } head[num_cnodes - 1].next = NULL; return head; } /* pulls a cnode off the free list, or returns NULL on failure */ static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "get_cnode"); if (journal->j_cnode_free <= 0) { return NULL; } journal->j_cnode_used++; journal->j_cnode_free--; cn = journal->j_cnode_free_list; if (!cn) { return cn; } if (cn->next) { cn->next->prev = NULL; } journal->j_cnode_free_list = cn->next; memset(cn, 0, sizeof(struct reiserfs_journal_cnode)); return cn; } /* * returns a cnode to the free list */ static void free_cnode(struct super_block *sb, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "free_cnode"); journal->j_cnode_used--; journal->j_cnode_free++; /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */ cn->next = journal->j_cnode_free_list; if (journal->j_cnode_free_list) { journal->j_cnode_free_list->prev = cn; } cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */ journal->j_cnode_free_list = cn; } static void clear_prepared_bits(struct buffer_head *bh) { clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); } /* * return a cnode with same dev, block number and size in table, * or null if not found */ static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct super_block *sb, struct reiserfs_journal_cnode **table, long bl) { struct reiserfs_journal_cnode *cn; cn = journal_hash(table, sb, bl); while (cn) { if (cn->blocknr == bl && cn->sb == sb) return cn; cn = cn->hnext; } return (struct reiserfs_journal_cnode *)0; } /* * this actually means 'can this block be reallocated yet?'. If you set * search_all, a block can only be allocated if it is not in the current * transaction, was not freed by the current transaction, and has no chance * of ever being overwritten by a replay after crashing. * * If you don't set search_all, a block can only be allocated if it is not * in the current transaction. Since deleting a block removes it from the * current transaction, this case should never happen. If you don't set * search_all, make sure you never write the block without logging it. * * next_zero_bit is a suggestion about the next block to try for find_forward. * when bl is rejected because it is set in a journal list bitmap, we search * for the next zero bit in the bitmap that rejected bl. Then, we return * that through next_zero_bit for find_forward to try. * * Just because we return something in next_zero_bit does not mean we won't * reject it on the next call to reiserfs_in_journal */ int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, int bit_nr, int search_all, b_blocknr_t * next_zero_bit) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_list_bitmap *jb; int i; unsigned long bl; *next_zero_bit = 0; /* always start this at zero. */ PROC_INFO_INC(sb, journal.in_journal); /* * If we aren't doing a search_all, this is a metablock, and it * will be logged before use. if we crash before the transaction * that freed it commits, this transaction won't have committed * either, and the block will never be written */ if (search_all) { for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { PROC_INFO_INC(sb, journal.in_journal_bitmap); jb = journal->j_list_bitmap + i; if (jb->journal_list && jb->bitmaps[bmap_nr] && test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]-> data)) { *next_zero_bit = find_next_zero_bit((unsigned long *) (jb->bitmaps[bmap_nr]-> data), sb->s_blocksize << 3, bit_nr + 1); return 1; } } } bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr; /* is it in any old transactions? */ if (search_all && (get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) { return 1; } /* is it in the current transaction. This should never happen */ if ((get_journal_hash_dev(sb, journal->j_hash_table, bl))) { BUG(); return 1; } PROC_INFO_INC(sb, journal.in_journal_reusable); /* safe for reuse */ return 0; } /* insert cn into table */ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal_cnode *cn_orig; cn_orig = journal_hash(table, cn->sb, cn->blocknr); cn->hnext = cn_orig; cn->hprev = NULL; if (cn_orig) { cn_orig->hprev = cn; } journal_hash(table, cn->sb, cn->blocknr) = cn; } /* lock the current transaction */ static inline void lock_journal(struct super_block *sb) { PROC_INFO_INC(sb, journal.lock_journal); reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb); } /* unlock the current transaction */ static inline void unlock_journal(struct super_block *sb) { mutex_unlock(&SB_JOURNAL(sb)->j_mutex); } static inline void get_journal_list(struct reiserfs_journal_list *jl) { jl->j_refcount++; } static inline void put_journal_list(struct super_block *s, struct reiserfs_journal_list *jl) { if (jl->j_refcount < 1) { reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d", jl->j_trans_id, jl->j_refcount); } if (--jl->j_refcount == 0) kfree(jl); } /* * this used to be much more involved, and I'm keeping it just in case * things get ugly again. it gets called by flush_commit_list, and * cleans up any data stored about blocks freed during a transaction. */ static void cleanup_freed_for_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl) { struct reiserfs_list_bitmap *jb = jl->j_list_bitmap; if (jb) { cleanup_bitmap_list(sb, jb); } jl->j_list_bitmap->journal_list = NULL; jl->j_list_bitmap = NULL; } static int journal_list_still_alive(struct super_block *s, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct list_head *entry = &journal->j_journal_list; struct reiserfs_journal_list *jl; if (!list_empty(entry)) { jl = JOURNAL_LIST_ENTRY(entry->next); if (jl->j_trans_id <= trans_id) { return 1; } } return 0; } /* * If page->mapping was null, we failed to truncate this page for * some reason. Most likely because it was truncated after being * logged via data=journal. * * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh. If page->mapping was * null, it tries to free buffers on the page, which should make the * final put_page drop the page from the lru. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; if (!page->mapping && trylock_page(page)) { get_page(page); put_bh(bh); if (!page->mapping) try_to_free_buffers(page); unlock_page(page); put_page(page); } else { put_bh(bh); } } static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { if (buffer_journaled(bh)) { reiserfs_warning(NULL, "clm-2084", "pinned buffer %lu:%pg sent to disk", bh->b_blocknr, bh->b_bdev); } if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); release_buffer_page(bh); } static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) { if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); put_bh(bh); } static void submit_logged_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_buffer_io_sync; clear_buffer_journal_new(bh); clear_buffer_dirty(bh); if (!test_clear_buffer_journal_test(bh)) BUG(); if (!buffer_uptodate(bh)) BUG(); submit_bh(REQ_OP_WRITE, 0, bh); } static void submit_ordered_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_ordered_io; clear_buffer_dirty(bh); if (!buffer_uptodate(bh)) BUG(); submit_bh(REQ_OP_WRITE, 0, bh); } #define CHUNK_SIZE 32 struct buffer_chunk { struct buffer_head *bh[CHUNK_SIZE]; int nr; }; static void write_chunk(struct buffer_chunk *chunk) { int i; for (i = 0; i < chunk->nr; i++) { submit_logged_buffer(chunk->bh[i]); } chunk->nr = 0; } static void write_ordered_chunk(struct buffer_chunk *chunk) { int i; for (i = 0; i < chunk->nr; i++) { submit_ordered_buffer(chunk->bh[i]); } chunk->nr = 0; } static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, spinlock_t * lock, void (fn) (struct buffer_chunk *)) { int ret = 0; BUG_ON(chunk->nr >= CHUNK_SIZE); chunk->bh[chunk->nr++] = bh; if (chunk->nr >= CHUNK_SIZE) { ret = 1; if (lock) { spin_unlock(lock); fn(chunk); spin_lock(lock); } else { fn(chunk); } } return ret; } static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0); static struct reiserfs_jh *alloc_jh(void) { struct reiserfs_jh *jh; while (1) { jh = kmalloc(sizeof(*jh), GFP_NOFS); if (jh) { atomic_inc(&nr_reiserfs_jh); return jh; } yield(); } } /* * we want to free the jh when the buffer has been written * and waited on */ void reiserfs_free_jh(struct buffer_head *bh) { struct reiserfs_jh *jh; jh = bh->b_private; if (jh) { bh->b_private = NULL; jh->bh = NULL; list_del_init(&jh->list); kfree(jh); if (atomic_read(&nr_reiserfs_jh) <= 0) BUG(); atomic_dec(&nr_reiserfs_jh); put_bh(bh); } } static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh, int tail) { struct reiserfs_jh *jh; if (bh->b_private) { spin_lock(&j->j_dirty_buffers_lock); if (!bh->b_private) { spin_unlock(&j->j_dirty_buffers_lock); goto no_jh; } jh = bh->b_private; list_del_init(&jh->list); } else { no_jh: get_bh(bh); jh = alloc_jh(); spin_lock(&j->j_dirty_buffers_lock); /* * buffer must be locked for __add_jh, should be able to have * two adds at the same time */ BUG_ON(bh->b_private); jh->bh = bh; bh->b_private = jh; } jh->jl = j->j_current_jl; if (tail) list_add_tail(&jh->list, &jh->jl->j_tail_bh_list); else { list_add_tail(&jh->list, &jh->jl->j_bh_list); } spin_unlock(&j->j_dirty_buffers_lock); return 0; } int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1); } int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0); } #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) static int write_ordered_buffers(spinlock_t * lock, struct reiserfs_journal *j, struct reiserfs_journal_list *jl, struct list_head *list) { struct buffer_head *bh; struct reiserfs_jh *jh; int ret = j->j_errno; struct buffer_chunk chunk; struct list_head tmp; INIT_LIST_HEAD(&tmp); chunk.nr = 0; spin_lock(lock); while (!list_empty(list)) { jh = JH_ENTRY(list->next); bh = jh->bh; get_bh(bh); if (!trylock_buffer(bh)) { if (!buffer_dirty(bh)) { list_move(&jh->list, &tmp); goto loop_next; } spin_unlock(lock); if (chunk.nr) write_ordered_chunk(&chunk); wait_on_buffer(bh); cond_resched(); spin_lock(lock); goto loop_next; } /* * in theory, dirty non-uptodate buffers should never get here, * but the upper layer io error paths still have a few quirks. * Handle them here as gracefully as we can */ if (!buffer_uptodate(bh) && buffer_dirty(bh)) { clear_buffer_dirty(bh); ret = -EIO; } if (buffer_dirty(bh)) { list_move(&jh->list, &tmp); add_to_chunk(&chunk, bh, lock, write_ordered_chunk); } else { reiserfs_free_jh(bh); unlock_buffer(bh); } loop_next: put_bh(bh); cond_resched_lock(lock); } if (chunk.nr) { spin_unlock(lock); write_ordered_chunk(&chunk); spin_lock(lock); } while (!list_empty(&tmp)) { jh = JH_ENTRY(tmp.prev); bh = jh->bh; get_bh(bh); reiserfs_free_jh(bh); if (buffer_locked(bh)) { spin_unlock(lock); wait_on_buffer(bh); spin_lock(lock); } if (!buffer_uptodate(bh)) { ret = -EIO; } /* * ugly interaction with invalidatepage here. * reiserfs_invalidate_page will pin any buffer that has a * valid journal head from an older transaction. If someone * else sets our buffer dirty after we write it in the first * loop, and then someone truncates the page away, nobody * will ever write the buffer. We're safe if we write the * page one last time after freeing the journal header. */ if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { spin_unlock(lock); ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); spin_lock(lock); } put_bh(bh); cond_resched_lock(lock); } spin_unlock(lock); return ret; } static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *other_jl; struct reiserfs_journal_list *first_jl; struct list_head *entry; unsigned int trans_id = jl->j_trans_id; unsigned int other_trans_id; find_first: /* * first we walk backwards to find the oldest uncommitted transation */ first_jl = jl; entry = jl->j_list.prev; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); if (entry == &journal->j_journal_list || atomic_read(&other_jl->j_older_commits_done)) break; first_jl = other_jl; entry = other_jl->j_list.prev; } /* if we didn't find any older uncommitted transactions, return now */ if (first_jl == jl) { return 0; } entry = &first_jl->j_list; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); other_trans_id = other_jl->j_trans_id; if (other_trans_id < trans_id) { if (atomic_read(&other_jl->j_commit_left) != 0) { flush_commit_list(s, other_jl, 0); /* list we were called with is gone, return */ if (!journal_list_still_alive(s, trans_id)) return 1; /* * the one we just flushed is gone, this means * all older lists are also gone, so first_jl * is no longer valid either. Go back to the * beginning. */ if (!journal_list_still_alive (s, other_trans_id)) { goto find_first; } } entry = entry->next; if (entry == &journal->j_journal_list) return 0; } else { return 0; } } return 0; } static int reiserfs_async_progress_wait(struct super_block *s) { struct reiserfs_journal *j = SB_JOURNAL(s); if (atomic_read(&j->j_async_throttle)) { int depth; depth = reiserfs_write_unlock_nested(s); congestion_wait(BLK_RW_ASYNC, HZ / 10); reiserfs_write_lock_nested(s, depth); } return 0; } /* * if this journal list still has commit blocks unflushed, send them to disk. * * log areas must be flushed in order (transaction 2 can't commit before * transaction 1) Before the commit block can by written, every other log * block must be safely on disk */ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { int i; b_blocknr_t bn; struct buffer_head *tbh = NULL; unsigned int trans_id = jl->j_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); int retval = 0; int write_len; int depth; reiserfs_check_lock_depth(s, "flush_commit_list"); if (atomic_read(&jl->j_older_commits_done)) { return 0; } /* * before we can put our commit blocks on disk, we have to make * sure everyone older than us is on disk too */ BUG_ON(jl->j_len <= 0); BUG_ON(trans_id == journal->j_trans_id); get_journal_list(jl); if (flushall) { if (flush_older_commits(s, jl) == 1) { /* * list disappeared during flush_older_commits. * return */ goto put_jl; } } /* make sure nobody is trying to flush this one at the same time */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s); if (!journal_list_still_alive(s, trans_id)) { mutex_unlock(&jl->j_commit_mutex); goto put_jl; } BUG_ON(jl->j_trans_id == 0); /* this commit is done, exit */ if (atomic_read(&jl->j_commit_left) <= 0) { if (flushall) { atomic_set(&jl->j_older_commits_done, 1); } mutex_unlock(&jl->j_commit_mutex); goto put_jl; } if (!list_empty(&jl->j_bh_list)) { int ret; /* * We might sleep in numerous places inside * write_ordered_buffers. Relax the write lock. */ depth = reiserfs_write_unlock_nested(s); ret = write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_bh_list); if (ret < 0 && retval == 0) retval = ret; reiserfs_write_lock_nested(s, depth); } BUG_ON(!list_empty(&jl->j_bh_list)); /* * for the description block and all the log blocks, submit any buffers * that haven't already reached the disk. Try to write at least 256 * log blocks. later on, we will only wait on blocks that correspond * to this transaction, but while we're unplugging we might as well * get a chunk of data on there. */ atomic_inc(&journal->j_async_throttle); write_len = jl->j_len + 1; if (write_len < 256) write_len = 256; for (i = 0 ; i < write_len ; i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); if (tbh) { if (buffer_dirty(tbh)) { depth = reiserfs_write_unlock_nested(s); ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh); reiserfs_write_lock_nested(s, depth); } put_bh(tbh) ; } } atomic_dec(&journal->j_async_throttle); for (i = 0; i < (jl->j_len + 1); i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); depth = reiserfs_write_unlock_nested(s); __wait_on_buffer(tbh); reiserfs_write_lock_nested(s, depth); /* * since we're using ll_rw_blk above, it might have skipped * over a locked buffer. Double check here */ /* redundant, sync_dirty_buffer() checks */ if (buffer_dirty(tbh)) { depth = reiserfs_write_unlock_nested(s); sync_dirty_buffer(tbh); reiserfs_write_lock_nested(s, depth); } if (unlikely(!buffer_uptodate(tbh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-601", "buffer write failed"); #endif retval = -EIO; } /* once for journal_find_get_block */ put_bh(tbh); /* once due to original getblk in do_journal_end */ put_bh(tbh); atomic_dec(&jl->j_commit_left); } BUG_ON(atomic_read(&jl->j_commit_left) != 1); /* * If there was a write error in the journal - we can't commit * this transaction - it will be invalid and, if successful, * will just end up propagating the write error out to * the file system. */ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { if (buffer_dirty(jl->j_commit_bh)) BUG(); mark_buffer_dirty(jl->j_commit_bh) ; depth = reiserfs_write_unlock_nested(s); if (reiserfs_barrier_flush(s)) __sync_dirty_buffer(jl->j_commit_bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA); else sync_dirty_buffer(jl->j_commit_bh); reiserfs_write_lock_nested(s, depth); } /* * If there was a write error in the journal - we can't commit this * transaction - it will be invalid and, if successful, will just end * up propagating the write error out to the filesystem. */ if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-615", "buffer write failed"); #endif retval = -EIO; } bforget(jl->j_commit_bh); if (journal->j_last_commit_id != 0 && (jl->j_trans_id - journal->j_last_commit_id) != 1) { reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu", journal->j_last_commit_id, jl->j_trans_id); } journal->j_last_commit_id = jl->j_trans_id; /* * now, every commit block is on the disk. It is safe to allow * blocks freed during this transaction to be reallocated */ cleanup_freed_for_journal_list(s, jl); retval = retval ? retval : journal->j_errno; /* mark the metadata dirty */ if (!retval) dirty_one_transaction(s, jl); atomic_dec(&jl->j_commit_left); if (flushall) { atomic_set(&jl->j_older_commits_done, 1); } mutex_unlock(&jl->j_commit_mutex); put_jl: put_journal_list(s, jl); if (retval) reiserfs_abort(s, retval, "Journal write error in %s", __func__); return retval; } /* * flush_journal_list frequently needs to find a newer transaction for a * given block. This does that, or returns NULL if it can't find anything */ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) { return cn->jlist; } cn = cn->hprev; } return NULL; } static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **, struct reiserfs_journal_list *, unsigned long, int); /* * once all the real blocks have been flushed, it is safe to remove them * from the journal list for this transaction. Aside from freeing the * cnode, this also allows the block to be reallocated for data blocks * if it had been deleted. */ static void remove_all_from_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl, int debug) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *last; cn = jl->j_realblock; /* * which is better, to lock once around the whole loop, or * to lock for each call to remove_journal_hash? */ while (cn) { if (cn->blocknr != 0) { if (debug) { reiserfs_warning(sb, "reiserfs-2201", "block %u, bh is %d, state %ld", cn->blocknr, cn->bh ? 1 : 0, cn->state); } cn->state = 0; remove_journal_hash(sb, journal->j_list_hash_table, jl, cn->blocknr, 1); } last = cn; cn = cn->next; free_cnode(sb, last); } jl->j_realblock = NULL; } /* * if this timestamp is greater than the timestamp we wrote last to the * header block, write it to the header block. once this is done, I can * safely say the log area for this transaction won't ever be replayed, * and I can start releasing blocks in this transaction for reuse as data * blocks. called by flush_journal_list, before it calls * remove_all_from_journal_list */ static int _update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { struct reiserfs_journal_header *jh; struct reiserfs_journal *journal = SB_JOURNAL(sb); int depth; if (reiserfs_is_journal_aborted(journal)) return -EIO; if (trans_id >= journal->j_last_flush_trans_id) { if (buffer_locked((journal->j_header_bh))) { depth = reiserfs_write_unlock_nested(sb); __wait_on_buffer(journal->j_header_bh); reiserfs_write_lock_nested(sb, depth); if (unlikely(!buffer_uptodate(journal->j_header_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(sb, "journal-699", "buffer write failed"); #endif return -EIO; } } journal->j_last_flush_trans_id = trans_id; journal->j_first_unflushed_offset = offset; jh = (struct reiserfs_journal_header *)(journal->j_header_bh-> b_data); jh->j_last_flush_trans_id = cpu_to_le32(trans_id); jh->j_first_unflushed_offset = cpu_to_le32(offset); jh->j_mount_id = cpu_to_le32(journal->j_mount_id); set_buffer_dirty(journal->j_header_bh); depth = reiserfs_write_unlock_nested(sb); if (reiserfs_barrier_flush(sb)) __sync_dirty_buffer(journal->j_header_bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA); else sync_dirty_buffer(journal->j_header_bh); reiserfs_write_lock_nested(sb, depth); if (!buffer_uptodate(journal->j_header_bh)) { reiserfs_warning(sb, "journal-837", "IO error during journal replay"); return -EIO; } } return 0; } static int update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { return _update_journal_header_block(sb, offset, trans_id); } /* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list */ static int flush_older_journal_lists(struct super_block *sb, struct reiserfs_journal_list *jl) { struct list_head *entry; struct reiserfs_journal_list *other_jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned int trans_id = jl->j_trans_id; /* * we know we are the only ones flushing things, no extra race * protection is required. */ restart: entry = journal->j_journal_list.next; /* Did we wrap? */ if (entry == &journal->j_journal_list) return 0; other_jl = JOURNAL_LIST_ENTRY(entry); if (other_jl->j_trans_id < trans_id) { BUG_ON(other_jl->j_refcount <= 0); /* do not flush all */ flush_journal_list(sb, other_jl, 0); /* other_jl is now deleted from the list */ goto restart; } return 0; } static void del_from_work_list(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (!list_empty(&jl->j_working_list)) { list_del_init(&jl->j_working_list); journal->j_num_work_lists--; } } /* * flush a journal list, both commit and real blocks * * always set flushall to 1, unless you are calling from inside * flush_journal_list * * IMPORTANT. This can only be called while there are no journal writers, * and the journal is locked. That means it can only be called from * do_journal_end, or by journal_release */ static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { struct reiserfs_journal_list *pjl; struct reiserfs_journal_cnode *cn; int count; int was_jwait = 0; int was_dirty = 0; struct buffer_head *saved_bh; unsigned long j_len_saved = jl->j_len; struct reiserfs_journal *journal = SB_JOURNAL(s); int err = 0; int depth; BUG_ON(j_len_saved <= 0); if (atomic_read(&journal->j_wcount) != 0) { reiserfs_warning(s, "clm-2048", "called with wcount %d", atomic_read(&journal->j_wcount)); } /* if flushall == 0, the lock is already held */ if (flushall) { reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); } else if (mutex_trylock(&journal->j_flush_mutex)) { BUG(); } count = 0; if (j_len_saved > journal->j_trans_max) { reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu", j_len_saved, jl->j_trans_id); return 0; } /* if all the work is already done, get out of here */ if (atomic_read(&jl->j_nonzerolen) <= 0 && atomic_read(&jl->j_commit_left) <= 0) { goto flush_older_and_return; } /* * start by putting the commit list on disk. This will also flush * the commit lists of any olders transactions */ flush_commit_list(s, jl, 1); if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted(journal)) BUG(); /* are we done now? */ if (atomic_read(&jl->j_nonzerolen) <= 0 && atomic_read(&jl->j_commit_left) <= 0) { goto flush_older_and_return; } /* * loop through each cnode, see if we need to write it, * or wait on a more recent transaction, or just ignore it */ if (atomic_read(&journal->j_wcount) != 0) { reiserfs_panic(s, "journal-844", "journal list is flushing, " "wcount is not 0"); } cn = jl->j_realblock; while (cn) { was_jwait = 0; was_dirty = 0; saved_bh = NULL; /* blocknr of 0 is no longer in the hash, ignore it */ if (cn->blocknr == 0) { goto free_cnode; } /* * This transaction failed commit. * Don't write out to the disk */ if (!(jl->j_state & LIST_DIRTY)) goto free_cnode; pjl = find_newer_jl_for_cn(cn); /* * the order is important here. We check pjl to make sure we * don't clear BH_JDirty_wait if we aren't the one writing this * block to disk */ if (!pjl && cn->bh) { saved_bh = cn->bh; /* * we do this to make sure nobody releases the * buffer while we are working with it */ get_bh(saved_bh); if (buffer_journal_dirty(saved_bh)) { BUG_ON(!can_dirty(cn)); was_jwait = 1; was_dirty = 1; } else if (can_dirty(cn)) { /* * everything with !pjl && jwait * should be writable */ BUG(); } } /* * if someone has this block in a newer transaction, just make * sure they are committed, and don't try writing it to disk */ if (pjl) { if (atomic_read(&pjl->j_commit_left)) flush_commit_list(s, pjl, 1); goto free_cnode; } /* * bh == NULL when the block got to disk on its own, OR, * the block got freed in a future transaction */ if (saved_bh == NULL) { goto free_cnode; } /* * this should never happen. kupdate_one_transaction has * this list locked while it works, so we should never see a * buffer here that is not marked JDirty_wait */ if ((!was_jwait) && !buffer_locked(saved_bh)) { reiserfs_warning(s, "journal-813", "BAD! buffer %llu %cdirty %cjwait, " "not in a newer transaction", (unsigned long long)saved_bh-> b_blocknr, was_dirty ? ' ' : '!', was_jwait ? ' ' : '!'); } if (was_dirty) { /* * we inc again because saved_bh gets decremented * at free_cnode */ get_bh(saved_bh); set_bit(BLOCK_NEEDS_FLUSH, &cn->state); lock_buffer(saved_bh); BUG_ON(cn->blocknr != saved_bh->b_blocknr); if (buffer_dirty(saved_bh)) submit_logged_buffer(saved_bh); else unlock_buffer(saved_bh); count++; } else { reiserfs_warning(s, "clm-2082", "Unable to flush buffer %llu in %s", (unsigned long long)saved_bh-> b_blocknr, __func__); } free_cnode: cn = cn->next; if (saved_bh) { /* * we incremented this to keep others from * taking the buffer head away */ put_bh(saved_bh); if (atomic_read(&saved_bh->b_count) < 0) { reiserfs_warning(s, "journal-945", "saved_bh->b_count < 0"); } } } if (count > 0) { cn = jl->j_realblock; while (cn) { if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { if (!cn->bh) { reiserfs_panic(s, "journal-1011", "cn->bh is NULL"); } depth = reiserfs_write_unlock_nested(s); __wait_on_buffer(cn->bh); reiserfs_write_lock_nested(s, depth); if (!cn->bh) { reiserfs_panic(s, "journal-1012", "cn->bh is NULL"); } if (unlikely(!buffer_uptodate(cn->bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-949", "buffer write failed"); #endif err = -EIO; } /* * note, we must clear the JDirty_wait bit * after the up to date check, otherwise we * race against our flushpage routine */ BUG_ON(!test_clear_buffer_journal_dirty (cn->bh)); /* drop one ref for us */ put_bh(cn->bh); /* drop one ref for journal_mark_dirty */ release_buffer_page(cn->bh); } cn = cn->next; } } if (err) reiserfs_abort(s, -EIO, "Write error while pushing transaction to disk in %s", __func__); flush_older_and_return: /* * before we can update the journal header block, we _must_ flush all * real blocks from all older transactions to disk. This is because * once the header block is updated, this transaction will not be * replayed after a crash */ if (flushall) { flush_older_journal_lists(s, jl); } err = journal->j_errno; /* * before we can remove everything from the hash tables for this * transaction, we must make sure it can never be replayed * * since we are only called from do_journal_end, we know for sure there * are no allocations going on while we are flushing journal lists. So, * we only need to update the journal header block for the last list * being flushed */ if (!err && flushall) { err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id); if (err) reiserfs_abort(s, -EIO, "Write error while updating journal header in %s", __func__); } remove_all_from_journal_list(s, jl, 0); list_del_init(&jl->j_list); journal->j_num_lists--; del_from_work_list(s, jl); if (journal->j_last_flush_id != 0 && (jl->j_trans_id - journal->j_last_flush_id) != 1) { reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu", journal->j_last_flush_id, jl->j_trans_id); } journal->j_last_flush_id = jl->j_trans_id; /* * not strictly required since we are freeing the list, but it should * help find code using dead lists later on */ jl->j_len = 0; atomic_set(&jl->j_nonzerolen, 0); jl->j_start = 0; jl->j_realblock = NULL; jl->j_commit_bh = NULL; jl->j_trans_id = 0; jl->j_state = 0; put_journal_list(s, jl); if (flushall) mutex_unlock(&journal->j_flush_mutex); return err; } static int write_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_chunk *chunk) { struct reiserfs_journal_cnode *cn; int ret = 0; jl->j_state |= LIST_TOUCHED; del_from_work_list(s, jl); if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) { return 0; } cn = jl->j_realblock; while (cn) { /* * if the blocknr == 0, this has been cleared from the hash, * skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) { struct buffer_head *tmp_bh; /* * we can race against journal_mark_freed when we try * to lock_buffer(cn->bh), so we have to inc the buffer * count, and recheck things after locking */ tmp_bh = cn->bh; get_bh(tmp_bh); lock_buffer(tmp_bh); if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) { if (!buffer_journal_dirty(tmp_bh) || buffer_journal_prepared(tmp_bh)) BUG(); add_to_chunk(chunk, tmp_bh, NULL, write_chunk); ret++; } else { /* note, cn->bh might be null now */ unlock_buffer(tmp_bh); } put_bh(tmp_bh); } next: cn = cn->next; cond_resched(); } return ret; } /* used by flush_commit_list */ static void dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal_list *pjl; jl->j_state |= LIST_DIRTY; cn = jl->j_realblock; while (cn) { /* * look for a more recent transaction that logged this * buffer. Only the most recent transaction with a buffer in * it is allowed to send that buffer to disk */ pjl = find_newer_jl_for_cn(cn); if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh)) { BUG_ON(!can_dirty(cn)); /* * if the buffer is prepared, it will either be logged * or restored. If restored, we need to make sure * it actually gets marked dirty */ clear_buffer_journal_new(cn->bh); if (buffer_journal_prepared(cn->bh)) { set_buffer_journal_restore_dirty(cn->bh); } else { set_buffer_journal_test(cn->bh); mark_buffer_dirty(cn->bh); } } cn = cn->next; } } static int kupdate_transactions(struct super_block *s, struct reiserfs_journal_list *jl, struct reiserfs_journal_list **next_jl, unsigned int *next_trans_id, int num_blocks, int num_trans) { int ret = 0; int written = 0; int transactions_flushed = 0; unsigned int orig_trans_id = jl->j_trans_id; struct buffer_chunk chunk; struct list_head *entry; struct reiserfs_journal *journal = SB_JOURNAL(s); chunk.nr = 0; reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); if (!journal_list_still_alive(s, orig_trans_id)) { goto done; } /* * we've got j_flush_mutex held, nobody is going to delete any * of these lists out from underneath us */ while ((num_trans && transactions_flushed < num_trans) || (!num_trans && written < num_blocks)) { if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) || atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY)) { del_from_work_list(s, jl); break; } ret = write_one_transaction(s, jl, &chunk); if (ret < 0) goto done; transactions_flushed++; written += ret; entry = jl->j_list.next; /* did we wrap? */ if (entry == &journal->j_journal_list) { break; } jl = JOURNAL_LIST_ENTRY(entry); /* don't bother with older transactions */ if (jl->j_trans_id <= orig_trans_id) break; } if (chunk.nr) { write_chunk(&chunk); } done: mutex_unlock(&journal->j_flush_mutex); return ret; } /* * for o_sync and fsync heavy applications, they tend to use * all the journa list slots with tiny transactions. These * trigger lots and lots of calls to update the header block, which * adds seeks and slows things down. * * This function tries to clear out a large chunk of the journal lists * at once, which makes everything faster since only the newest journal * list updates the header block */ static int flush_used_journal_lists(struct super_block *s, struct reiserfs_journal_list *jl) { unsigned long len = 0; unsigned long cur_len; int i; int limit = 256; struct reiserfs_journal_list *tjl; struct reiserfs_journal_list *flush_jl; unsigned int trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); flush_jl = tjl = jl; /* in data logging mode, try harder to flush a lot of blocks */ if (reiserfs_data_log(s)) limit = 1024; /* flush for 256 transactions or limit blocks, whichever comes first */ for (i = 0; i < 256 && len < limit; i++) { if (atomic_read(&tjl->j_commit_left) || tjl->j_trans_id < jl->j_trans_id) { break; } cur_len = atomic_read(&tjl->j_nonzerolen); if (cur_len > 0) { tjl->j_state &= ~LIST_TOUCHED; } len += cur_len; flush_jl = tjl; if (tjl->j_list.next == &journal->j_journal_list) break; tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); } get_journal_list(jl); get_journal_list(flush_jl); /* * try to find a group of blocks we can flush across all the * transactions, but only bother if we've actually spanned * across multiple lists */ if (flush_jl != jl) kupdate_transactions(s, jl, &tjl, &trans_id, len, i); flush_journal_list(s, flush_jl, 1); put_journal_list(s, flush_jl); put_journal_list(s, jl); return 0; } /* * removes any nodes in table with name block and dev as bh. * only touchs the hnext and hprev pointers. */ static void remove_journal_hash(struct super_block *sb, struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl, unsigned long block, int remove_freed) { struct reiserfs_journal_cnode *cur; struct reiserfs_journal_cnode **head; head = &(journal_hash(table, sb, block)); if (!head) { return; } cur = *head; while (cur) { if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) { if (cur->hnext) { cur->hnext->hprev = cur->hprev; } if (cur->hprev) { cur->hprev->hnext = cur->hnext; } else { *head = cur->hnext; } cur->blocknr = 0; cur->sb = NULL; cur->state = 0; /* * anybody who clears the cur->bh will also * dec the nonzerolen */ if (cur->bh && cur->jlist) atomic_dec(&cur->jlist->j_nonzerolen); cur->bh = NULL; cur->jlist = NULL; } cur = cur->hnext; } } static void free_journal_ram(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); kfree(journal->j_current_jl); journal->j_num_lists--; vfree(journal->j_cnode_free_orig); free_list_bitmaps(sb, journal->j_list_bitmap); free_bitmap_nodes(sb); /* must be after free_list_bitmaps */ if (journal->j_header_bh) { brelse(journal->j_header_bh); } /* * j_header_bh is on the journal dev, make sure * not to release the journal dev until we brelse j_header_bh */ release_journal_dev(sb, journal); vfree(journal); } /* * call on unmount. Only set error to 1 if you haven't made your way out * of read_super() yet. Any other caller must keep error at 0. */ static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb, int error) { struct reiserfs_transaction_handle myth; struct reiserfs_journal *journal = SB_JOURNAL(sb); /* * we only want to flush out transactions if we were * called with error == 0 */ if (!error && !sb_rdonly(sb)) { /* end the current trans */ BUG_ON(!th->t_trans_id); do_journal_end(th, FLUSH_ALL); /* * make sure something gets logged to force * our way into the flush code */ if (!journal_join(&myth, sb)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, FLUSH_ALL); } } /* this also catches errors during the do_journal_end above */ if (!error && reiserfs_is_journal_aborted(journal)) { memset(&myth, 0, sizeof(myth)); if (!journal_join_abort(&myth, sb)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, FLUSH_ALL); } } /* * We must release the write lock here because * the workqueue job (flush_async_commit) needs this lock */ reiserfs_write_unlock(sb); /* * Cancel flushing of old commits. Note that neither of these works * will be requeued because superblock is being shutdown and doesn't * have SB_ACTIVE set. */ reiserfs_cancel_old_flush(sb); /* wait for all commits to finish */ cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); free_journal_ram(sb); reiserfs_write_lock(sb); return 0; } /* * call on unmount. flush all journal trans, release all alloc'd ram */ int journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 0); } /* only call from an error condition inside reiserfs_read_super! */ int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 1); } /* * compares description block with commit block. * returns 1 if they differ, 0 if they are the same */ static int journal_compare_desc_commit(struct super_block *sb, struct reiserfs_journal_desc *desc, struct reiserfs_journal_commit *commit) { if (get_commit_trans_id(commit) != get_desc_trans_id(desc) || get_commit_trans_len(commit) != get_desc_trans_len(desc) || get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max || get_commit_trans_len(commit) <= 0) { return 1; } return 0; } /* * returns 0 if it did not find a description block * returns -1 if it found a corrupt commit block * returns 1 if both desc and commit were valid * NOTE: only called during fs mount */ static int journal_transaction_is_valid(struct super_block *sb, struct buffer_head *d_bh, unsigned int *oldest_invalid_trans_id, unsigned long *newest_mount_id) { struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; unsigned long offset; if (!d_bh) return 0; desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) { if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-986: transaction " "is valid returning because trans_id %d is greater than " "oldest_invalid %lu", get_desc_trans_id(desc), *oldest_invalid_trans_id); return 0; } if (newest_mount_id && *newest_mount_id > get_desc_mount_id(desc)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1087: transaction " "is valid returning because mount_id %d is less than " "newest_mount_id %lu", get_desc_mount_id(desc), *newest_mount_id); return -1; } if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) { reiserfs_warning(sb, "journal-2018", "Bad transaction length %d " "encountered, ignoring transaction", get_desc_trans_len(desc)); return -1; } offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); /* * ok, we have a journal description block, * let's see if the transaction was valid */ c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) return 0; commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_transaction_is_valid, commit offset %ld had bad " "time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); if (oldest_invalid_trans_id) { *oldest_invalid_trans_id = get_desc_trans_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1004: " "transaction_is_valid setting oldest invalid trans_id " "to %d", get_desc_trans_id(desc)); } return -1; } brelse(c_bh); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1006: found valid " "transaction start offset %llu, len %d id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_trans_id(desc)); return 1; } else { return 0; } } static void brelse_array(struct buffer_head **heads, int num) { int i; for (i = 0; i < num; i++) { brelse(heads[i]); } } /* * given the start, and values for the oldest acceptable transactions, * this either reads in a replays a transaction, or returns because the * transaction is invalid, or too old. * NOTE: only called during fs mount */ static int journal_read_transaction(struct super_block *sb, unsigned long cur_dblock, unsigned long oldest_start, unsigned int oldest_trans_id, unsigned long newest_mount_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; unsigned int trans_id = 0; struct buffer_head *c_bh; struct buffer_head *d_bh; struct buffer_head **log_blocks = NULL; struct buffer_head **real_blocks = NULL; unsigned int trans_offset; int i; int trans_half; d_bh = journal_bread(sb, cur_dblock); if (!d_bh) return 1; desc = (struct reiserfs_journal_desc *)d_bh->b_data; trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: " "journal_read_transaction, offset %llu, len %d mount_id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_mount_id(desc)); if (get_desc_trans_id(desc) < oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: " "journal_read_trans skipping because %lu is too old", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); brelse(d_bh); return 1; } if (get_desc_mount_id(desc) != newest_mount_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: " "journal_read_trans skipping because %d is != " "newest_mount_id %lu", get_desc_mount_id(desc), newest_mount_id); brelse(d_bh); return 1; } c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) { brelse(d_bh); return 1; } commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_read_transaction, " "commit offset %llu had bad time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); brelse(d_bh); return 1; } if (bdev_read_only(sb->s_bdev)) { reiserfs_warning(sb, "clm-2076", "device is readonly, unable to replay log"); brelse(c_bh); brelse(d_bh); return -EROFS; } trans_id = get_desc_trans_id(desc); /* * now we know we've got a good transaction, and it was * inside the valid time ranges */ log_blocks = kmalloc_array(get_desc_trans_len(desc), sizeof(struct buffer_head *), GFP_NOFS); real_blocks = kmalloc_array(get_desc_trans_len(desc), sizeof(struct buffer_head *), GFP_NOFS); if (!log_blocks || !real_blocks) { brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); reiserfs_warning(sb, "journal-1169", "kmalloc failed, unable to mount FS"); return -1; } /* get all the buffer heads */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0; i < get_desc_trans_len(desc); i++) { log_blocks[i] = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(sb)); if (i < trans_half) { real_blocks[i] = sb_getblk(sb, le32_to_cpu(desc->j_realblock[i])); } else { real_blocks[i] = sb_getblk(sb, le32_to_cpu(commit-> j_realblock[i - trans_half])); } if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) { reiserfs_warning(sb, "journal-1207", "REPLAY FAILURE fsck required! " "Block to replay is outside of " "filesystem"); goto abort_replay; } /* make sure we don't try to replay onto log or reserved area */ if (is_block_in_log_or_reserved_area (sb, real_blocks[i]->b_blocknr)) { reiserfs_warning(sb, "journal-1204", "REPLAY FAILURE fsck required! " "Trying to replay onto a log block"); abort_replay: brelse_array(log_blocks, i); brelse_array(real_blocks, i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } } /* read in the log blocks, memcpy to the corresponding real block */ ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks); for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(log_blocks[i]); if (!buffer_uptodate(log_blocks[i])) { reiserfs_warning(sb, "journal-1212", "REPLAY FAILURE fsck required! " "buffer write failed"); brelse_array(log_blocks + i, get_desc_trans_len(desc) - i); brelse_array(real_blocks, get_desc_trans_len(desc)); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size); set_buffer_uptodate(real_blocks[i]); brelse(log_blocks[i]); } /* flush out the real blocks */ for (i = 0; i < get_desc_trans_len(desc); i++) { set_buffer_dirty(real_blocks[i]); write_dirty_buffer(real_blocks[i], 0); } for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(real_blocks[i]); if (!buffer_uptodate(real_blocks[i])) { reiserfs_warning(sb, "journal-1226", "REPLAY FAILURE, fsck required! " "buffer write failed"); brelse_array(real_blocks + i, get_desc_trans_len(desc) - i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } brelse(real_blocks[i]); } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(sb)); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal " "start to offset %ld", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); /* * init starting values for the first transaction, in case * this is the last transaction to be replayed. */ journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb); journal->j_last_flush_trans_id = trans_id; journal->j_trans_id = trans_id + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return 0; } /* * This function reads blocks starting from block and to max_block of bufsize * size (but no more than BUFNR blocks at a time). This proved to improve * mounting speed on self-rebuilding raid5 arrays at least. * Right now it is only used from journal code. But later we might use it * from other places. * Note: Do not use journal_getblk/sb_getblk functions here! */ static struct buffer_head *reiserfs_breada(struct block_device *dev, b_blocknr_t block, int bufsize, b_blocknr_t max_block) { struct buffer_head *bhlist[BUFNR]; unsigned int blocks = BUFNR; struct buffer_head *bh; int i, j; bh = __getblk(dev, block, bufsize); if (buffer_uptodate(bh)) return (bh); if (block + BUFNR > max_block) { blocks = max_block - block; } bhlist[0] = bh; j = 1; for (i = 1; i < blocks; i++) { bh = __getblk(dev, block + i, bufsize); if (buffer_uptodate(bh)) { brelse(bh); break; } else bhlist[j++] = bh; } ll_rw_block(REQ_OP_READ, 0, j, bhlist); for (i = 1; i < j; i++) brelse(bhlist[i]); bh = bhlist[0]; wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL; } /* * read and replay the log * on a clean unmount, the journal header's next unflushed pointer will be * to an invalid transaction. This tests that before finding all the * transactions in the log, which makes normal mount times fast. * * After a crash, this starts with the next unflushed transaction, and * replays until it finds one too old, or invalid. * * On exit, it sets things up so the first transaction will work correctly. * NOTE: only called during fs mount */ static int journal_read(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; unsigned int oldest_trans_id = 0; unsigned int oldest_invalid_trans_id = 0; time64_t start; unsigned long oldest_start = 0; unsigned long cur_dblock = 0; unsigned long newest_mount_id = 9; struct buffer_head *d_bh; struct reiserfs_journal_header *jh; int valid_journal_header = 0; int replay_count = 0; int continue_replay = 1; int ret; cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_info(sb, "checking transaction log (%pg)\n", journal->j_dev_bd); start = ktime_get_seconds(); /* * step 1, read in the journal header block. Check the transaction * it says is the first unflushed, and if that transaction is not * valid, replay is done */ journal->j_header_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!journal->j_header_bh) { return 1; } jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data); if (le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(sb) && le32_to_cpu(jh->j_last_flush_trans_id) > 0) { oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset); oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; newest_mount_id = le32_to_cpu(jh->j_mount_id); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1153: found in " "header: first_unflushed_offset %d, last_flushed_trans_id " "%lu", le32_to_cpu(jh->j_first_unflushed_offset), le32_to_cpu(jh->j_last_flush_trans_id)); valid_journal_header = 1; /* * now, we try to read the first unflushed offset. If it * is not valid, there is nothing more we can do, and it * makes no sense to read through the whole log. */ d_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset)); ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL); if (!ret) { continue_replay = 0; } brelse(d_bh); goto start_log_replay; } /* * ok, there are transactions that need to be replayed. start * with the first log block, find all the valid transactions, and * pick out the oldest. */ while (continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb))) { /* * Note that it is required for blocksize of primary fs * device and journal device to be the same */ d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, sb->s_blocksize, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); ret = journal_transaction_is_valid(sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id); if (ret == 1) { desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (oldest_start == 0) { /* init all oldest_ values */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1179: Setting " "oldest_start to offset %llu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } else if (oldest_trans_id > get_desc_trans_id(desc)) { /* one we just read was older */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting " "oldest_start to offset %lu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } if (newest_mount_id < get_desc_mount_id(desc)) { newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %d", get_desc_mount_id(desc)); } cur_dblock += get_desc_trans_len(desc) + 2; } else { cur_dblock++; } brelse(d_bh); } start_log_replay: cur_dblock = oldest_start; if (oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay " "from offset %llu, trans_id %lu", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb), oldest_trans_id); } replay_count = 0; while (continue_replay && oldest_trans_id > 0) { ret = journal_read_transaction(sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id); if (ret < 0) { return ret; } else if (ret != 0) { break; } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start; replay_count++; if (cur_dblock == oldest_start) break; } if (oldest_trans_id == 0) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1225: No valid " "transactions found"); } /* * j_start does not get set correctly if we don't replay any * transactions. if we had a valid journal_header, set j_start * to the first unflushed transaction value, copy the trans_id * from the header */ if (valid_journal_header && replay_count == 0) { journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset); journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id); journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1; } else { journal->j_mount_id = newest_mount_id + 1; } reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %lu", journal->j_mount_id); journal->j_first_unflushed_offset = journal->j_start; if (replay_count > 0) { reiserfs_info(sb, "replayed %d transactions in %lu seconds\n", replay_count, ktime_get_seconds() - start); } /* needed to satisfy the locking in _update_journal_header_block */ reiserfs_write_lock(sb); if (!bdev_read_only(sb->s_bdev) && _update_journal_header_block(sb, journal->j_start, journal->j_last_flush_trans_id)) { reiserfs_write_unlock(sb); /* * replay failed, caller must call free_journal_ram and abort * the mount */ return -1; } reiserfs_write_unlock(sb); return 0; } static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s) { struct reiserfs_journal_list *jl; jl = kzalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS | __GFP_NOFAIL); INIT_LIST_HEAD(&jl->j_list); INIT_LIST_HEAD(&jl->j_working_list); INIT_LIST_HEAD(&jl->j_tail_bh_list); INIT_LIST_HEAD(&jl->j_bh_list); mutex_init(&jl->j_commit_mutex); SB_JOURNAL(s)->j_num_lists++; get_journal_list(jl); return jl; } static void journal_list_init(struct super_block *sb) { SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb); } static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal) { if (journal->j_dev_bd != NULL) { blkdev_put(journal->j_dev_bd, journal->j_dev_mode); journal->j_dev_bd = NULL; } } static int journal_init_dev(struct super_block *super, struct reiserfs_journal *journal, const char *jdev_name) { int result; dev_t jdev; fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; result = 0; journal->j_dev_bd = NULL; jdev = SB_ONDISK_JOURNAL_DEVICE(super) ? new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; if (bdev_read_only(super->s_bdev)) blkdev_mode = FMODE_READ; /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { if (jdev == super->s_dev) blkdev_mode &= ~FMODE_EXCL; journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, journal); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "sh-458", "cannot init journal device unknown-block(%u,%u): %i", MAJOR(jdev), MINOR(jdev), result); return result; } else if (jdev != super->s_dev) set_blocksize(journal->j_dev_bd, super->s_blocksize); return 0; } journal->j_dev_mode = blkdev_mode; journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "sh-457", "journal_init_dev: Cannot open '%s': %i", jdev_name, result); return result; } set_blocksize(journal->j_dev_bd, super->s_blocksize); reiserfs_info(super, "journal_init_dev: journal device: %pg\n", journal->j_dev_bd); return 0; } /* * When creating/tuning a file system user can assign some * journal params within boundaries which depend on the ratio * blocksize/standard_blocksize. * * For blocks >= standard_blocksize transaction size should * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more * then JOURNAL_TRANS_MAX_DEFAULT. * * For blocks < standard_blocksize these boundaries should be * decreased proportionally. */ #define REISERFS_STANDARD_BLKSIZE (4096) static int check_advise_trans_params(struct super_block *sb, struct reiserfs_journal *journal) { if (journal->j_trans_max) { /* Non-default journal params. Do sanity check for them. */ int ratio = 1; if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE) ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize; if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio || journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio || SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max < JOURNAL_MIN_RATIO) { reiserfs_warning(sb, "sh-462", "bad transaction max size (%u). " "FSCK?", journal->j_trans_max); return 1; } if (journal->j_max_batch != (journal->j_trans_max) * JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) { reiserfs_warning(sb, "sh-463", "bad transaction max batch (%u). " "FSCK?", journal->j_max_batch); return 1; } } else { /* * Default journal params. * The file system was created by old version * of mkreiserfs, so some fields contain zeros, * and we need to advise proper values for them */ if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) { reiserfs_warning(sb, "sh-464", "bad blocksize (%u)", sb->s_blocksize); return 1; } journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT; journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT; journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE; } return 0; } /* must be called once on fs mount. calls journal_read for you */ int journal_init(struct super_block *sb, const char *j_dev_name, int old_format, unsigned int commit_max_age) { int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2; struct buffer_head *bhjh; struct reiserfs_super_block *rs; struct reiserfs_journal_header *jh; struct reiserfs_journal *journal; struct reiserfs_journal_list *jl; int ret; journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal)); if (!journal) { reiserfs_warning(sb, "journal-1256", "unable to get memory for journal structure"); return 1; } INIT_LIST_HEAD(&journal->j_bitmap_nodes); INIT_LIST_HEAD(&journal->j_prealloc_list); INIT_LIST_HEAD(&journal->j_working_list); INIT_LIST_HEAD(&journal->j_journal_list); journal->j_persistent_trans = 0; if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap, reiserfs_bmap_count(sb))) goto free_and_return; allocate_bitmap_nodes(sb); /* reserved for journal area support */ SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ? REISERFS_OLD_DISK_OFFSET_IN_BYTES / sb->s_blocksize + reiserfs_bmap_count(sb) + 1 : REISERFS_DISK_OFFSET_IN_BYTES / sb->s_blocksize + 2); /* * Sanity check to see is the standard journal fitting * within first bitmap (actual for small blocksizes) */ if (!SB_ONDISK_JOURNAL_DEVICE(sb) && (SB_JOURNAL_1st_RESERVED_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) { reiserfs_warning(sb, "journal-1393", "journal does not fit for area addressed " "by first of bitmap blocks. It starts at " "%u and its size is %u. Block size %ld", SB_JOURNAL_1st_RESERVED_BLOCK(sb), SB_ONDISK_JOURNAL_SIZE(sb), sb->s_blocksize); goto free_and_return; } if (journal_init_dev(sb, journal, j_dev_name) != 0) { reiserfs_warning(sb, "sh-462", "unable to initialize journal device"); goto free_and_return; } rs = SB_DISK_SUPER_BLOCK(sb); /* read journal header */ bhjh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!bhjh) { reiserfs_warning(sb, "sh-459", "unable to read journal header"); goto free_and_return; } jh = (struct reiserfs_journal_header *)(bhjh->b_data); /* make sure that journal matches to the super block */ if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) { reiserfs_warning(sb, "sh-460", "journal header magic %x (device %pg) does " "not match to magic found in super block %x", jh->jh_journal.jp_journal_magic, journal->j_dev_bd, sb_jp_journal_magic(rs)); brelse(bhjh); goto free_and_return; } journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max); journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch); journal->j_max_commit_age = le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age); journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; if (check_advise_trans_params(sb, journal) != 0) goto free_and_return; journal->j_default_max_commit_age = journal->j_max_commit_age; if (commit_max_age != 0) { journal->j_max_commit_age = commit_max_age; journal->j_max_trans_age = commit_max_age; } reiserfs_info(sb, "journal params: device %pg, size %u, " "journal first block %u, max trans len %u, max batch %u, " "max commit age %u, max trans age %u\n", journal->j_dev_bd, SB_ONDISK_JOURNAL_SIZE(sb), SB_ONDISK_JOURNAL_1st_BLOCK(sb), journal->j_trans_max, journal->j_max_batch, journal->j_max_commit_age, journal->j_max_trans_age); brelse(bhjh); journal->j_list_bitmap_index = 0; journal_list_init(sb); memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); INIT_LIST_HEAD(&journal->j_dirty_buffers); spin_lock_init(&journal->j_dirty_buffers_lock); journal->j_start = 0; journal->j_len = 0; journal->j_len_alloc = 0; atomic_set(&journal->j_wcount, 0); atomic_set(&journal->j_async_throttle, 0); journal->j_bcount = 0; journal->j_trans_start_time = 0; journal->j_last = NULL; journal->j_first = NULL; init_waitqueue_head(&journal->j_join_wait); mutex_init(&journal->j_mutex); mutex_init(&journal->j_flush_mutex); journal->j_trans_id = 10; journal->j_mount_id = 10; journal->j_state = 0; atomic_set(&journal->j_jlock, 0); journal->j_cnode_free_list = allocate_cnodes(num_cnodes); journal->j_cnode_free_orig = journal->j_cnode_free_list; journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0; journal->j_cnode_used = 0; journal->j_must_wait = 0; if (journal->j_cnode_free == 0) { reiserfs_warning(sb, "journal-2004", "Journal cnode memory " "allocation failed (%ld bytes). Journal is " "too large for available memory. Usually " "this is due to a journal that is too large.", sizeof (struct reiserfs_journal_cnode) * num_cnodes); goto free_and_return; } init_journal_hash(sb); jl = journal->j_current_jl; /* * get_list_bitmap() may call flush_commit_list() which * requires the lock. Calling flush_commit_list() shouldn't happen * this early but I like to be paranoid. */ reiserfs_write_lock(sb); jl->j_list_bitmap = get_list_bitmap(sb, jl); reiserfs_write_unlock(sb); if (!jl->j_list_bitmap) { reiserfs_warning(sb, "journal-2005", "get_list_bitmap failed for journal list 0"); goto free_and_return; } ret = journal_read(sb); if (ret < 0) { reiserfs_warning(sb, "reiserfs-2006", "Replay Failure, unable to mount"); goto free_and_return; } INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); journal->j_work_sb = sb; return 0; free_and_return: free_journal_ram(sb); return 1; } /* * test for a polite end of the current transaction. Used by file_write, * and should be used by delete to make sure they don't write more than * can fit inside a single transaction */ int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); time64_t now = ktime_get_seconds(); /* cannot restart while nested */ BUG_ON(!th->t_trans_id); if (th->t_refcount > 1) return 0; if (journal->j_must_wait > 0 || (journal->j_len_alloc + new_alloc) >= journal->j_max_batch || atomic_read(&journal->j_jlock) || (now - journal->j_trans_start_time) > journal->j_max_trans_age || journal->j_cnode_free < (journal->j_trans_max * 3)) { return 1; } journal->j_len_alloc += new_alloc; th->t_blocks_allocated += new_alloc ; return 0; } /* this must be called inside a transaction */ void reiserfs_block_writes(struct reiserfs_transaction_handle *th) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); BUG_ON(!th->t_trans_id); journal->j_must_wait = 1; set_bit(J_WRITERS_BLOCKED, &journal->j_state); return; } /* this must be called without a transaction started */ void reiserfs_allow_writes(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); clear_bit(J_WRITERS_BLOCKED, &journal->j_state); wake_up(&journal->j_join_wait); } /* this must be called without a transaction started */ void reiserfs_wait_on_write_block(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); wait_event(journal->j_join_wait, !test_bit(J_WRITERS_BLOCKED, &journal->j_state)); } static void queue_log_writer(struct super_block *s) { wait_queue_entry_t wait; struct reiserfs_journal *journal = SB_JOURNAL(s); set_bit(J_WRITERS_QUEUED, &journal->j_state); /* * we don't want to use wait_event here because * we only want to wait once. */ init_waitqueue_entry(&wait, current); add_wait_queue(&journal->j_join_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) { int depth = reiserfs_write_unlock_nested(s); schedule(); reiserfs_write_lock_nested(s, depth); } __set_current_state(TASK_RUNNING); remove_wait_queue(&journal->j_join_wait, &wait); } static void wake_queued_writers(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state)) wake_up(&journal->j_join_wait); } static void let_transaction_grow(struct super_block *sb, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned long bcount = journal->j_bcount; while (1) { int depth; depth = reiserfs_write_unlock_nested(sb); schedule_timeout_uninterruptible(1); reiserfs_write_lock_nested(sb, depth); journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; while ((atomic_read(&journal->j_wcount) > 0 || atomic_read(&journal->j_jlock)) && journal->j_trans_id == trans_id) { queue_log_writer(sb); } if (journal->j_trans_id != trans_id) break; if (bcount == journal->j_bcount) break; bcount = journal->j_bcount; } } /* * join == true if you must join an existing transaction. * join == false if you can deal with waiting for others to finish * * this will block until the transaction is joinable. send the number of * blocks you expect to use in nblocks. */ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join) { time64_t now = ktime_get_seconds(); unsigned int old_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_transaction_handle myth; int sched_count = 0; int retval; int depth; reiserfs_check_lock_depth(sb, "journal_begin"); BUG_ON(nblocks > journal->j_trans_max); PROC_INFO_INC(sb, journal.journal_being); /* set here for journal_join */ th->t_refcount = 1; th->t_super = sb; relock: lock_journal(sb); if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) { unlock_journal(sb); retval = journal->j_errno; goto out_fail; } journal->j_bcount++; if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { unlock_journal(sb); depth = reiserfs_write_unlock_nested(sb); reiserfs_wait_on_write_block(sb); reiserfs_write_lock_nested(sb, depth); PROC_INFO_INC(sb, journal.journal_relock_writers); goto relock; } now = ktime_get_seconds(); /* * if there is no room in the journal OR * if this transaction is too old, and we weren't called joinable, * wait for it to finish before beginning we don't sleep if there * aren't other writers */ if ((!join && journal->j_must_wait > 0) || (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) || (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) || (!join && atomic_read(&journal->j_jlock)) || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) { old_trans_id = journal->j_trans_id; /* allow others to finish this transaction */ unlock_journal(sb); if (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch && ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75)) { if (atomic_read(&journal->j_wcount) > 10) { sched_count++; queue_log_writer(sb); goto relock; } } /* * don't mess with joining the transaction if all we * have to do is wait for someone else to do a commit */ if (atomic_read(&journal->j_jlock)) { while (journal->j_trans_id == old_trans_id && atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } goto relock; } retval = journal_join(&myth, sb); if (retval) goto out_fail; /* someone might have ended the transaction while we joined */ if (old_trans_id != journal->j_trans_id) { retval = do_journal_end(&myth, 0); } else { retval = do_journal_end(&myth, COMMIT_NOW); } if (retval) goto out_fail; PROC_INFO_INC(sb, journal.journal_relock_wcount); goto relock; } /* we are the first writer, set trans_id */ if (journal->j_trans_start_time == 0) { journal->j_trans_start_time = ktime_get_seconds(); } atomic_inc(&journal->j_wcount); journal->j_len_alloc += nblocks; th->t_blocks_logged = 0; th->t_blocks_allocated = nblocks; th->t_trans_id = journal->j_trans_id; unlock_journal(sb); INIT_LIST_HEAD(&th->t_list); return 0; out_fail: memset(th, 0, sizeof(*th)); /* * Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return retval; } struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct super_block *s, int nblocks) { int ret; struct reiserfs_transaction_handle *th; /* * if we're nesting into an existing transaction. It will be * persistent on its own */ if (reiserfs_transaction_running(s)) { th = current->journal_info; th->t_refcount++; BUG_ON(th->t_refcount < 2); return th; } th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS); if (!th) return NULL; ret = journal_begin(th, s, nblocks); if (ret) { kfree(th); return NULL; } SB_JOURNAL(s)->j_persistent_trans++; return th; } int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) { struct super_block *s = th->t_super; int ret = 0; if (th->t_trans_id) ret = journal_end(th); else ret = -EIO; if (th->t_refcount == 0) { SB_JOURNAL(s)->j_persistent_trans--; kfree(th); } return ret; } static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* * this keeps do_journal_end from NULLing out the * current->journal_info pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, 1, JBEGIN_JOIN); } int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *sb) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* * this keeps do_journal_end from NULLing out the * current->journal_info pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, 1, JBEGIN_ABORT); } int journal_begin(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; int ret; th->t_handle_save = NULL; if (cur_th) { /* we are nesting into the current transaction */ if (cur_th->t_super == sb) { BUG_ON(!cur_th->t_refcount); cur_th->t_refcount++; memcpy(th, cur_th, sizeof(*th)); if (th->t_refcount <= 1) reiserfs_warning(sb, "reiserfs-2005", "BAD: refcount <= 1, but " "journal_info != 0"); return 0; } else { /* * we've ended up with a handle from a different * filesystem. save it and restore on journal_end. * This should never really happen... */ reiserfs_warning(sb, "clm-2100", "nesting info a different FS"); th->t_handle_save = current->journal_info; current->journal_info = th; } } else { current->journal_info = th; } ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG); BUG_ON(current->journal_info != th); /* * I guess this boils down to being the reciprocal of clm-2100 above. * If do_journal_begin_r fails, we need to put it back, since * journal_end won't be called to do it. */ if (ret) current->journal_info = th->t_handle_save; else BUG_ON(!th->t_refcount); return ret; } /* * puts bh into the current transaction. If it was already there, reorders * removes the old pointers from the hash, and puts new ones in (to make * sure replay happen in the right order). * * if it was dirty, cleans and files onto the clean list. I can't let it * be dirty again until the transaction is committed. * * if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. */ int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct buffer_head *bh) { struct super_block *sb = th->t_super; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; int count_already_incd = 0; int prepared = 0; BUG_ON(!th->t_trans_id); PROC_INFO_INC(sb, journal.mark_dirty); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } prepared = test_clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); /* already in this transaction, we are done */ if (buffer_journaled(bh)) { PROC_INFO_INC(sb, journal.mark_dirty_already); return 0; } /* * this must be turned into a panic instead of a warning. We can't * allow a dirty or journal_dirty or locked buffer to be logged, as * some changes could get to disk too early. NOT GOOD. */ if (!prepared || buffer_dirty(bh)) { reiserfs_warning(sb, "journal-1777", "buffer %llu bad state " "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT", (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!', buffer_locked(bh) ? ' ' : '!', buffer_dirty(bh) ? ' ' : '!', buffer_journal_dirty(bh) ? ' ' : '!'); } if (atomic_read(&journal->j_wcount) <= 0) { reiserfs_warning(sb, "journal-1409", "returning because j_wcount was %d", atomic_read(&journal->j_wcount)); return 1; } /* * this error means I've screwed up, and we've overflowed * the transaction. Nothing can be done here, except make the * FS readonly or panic. */ if (journal->j_len >= journal->j_trans_max) { reiserfs_panic(th->t_super, "journal-1413", "j_len (%lu) is too big", journal->j_len); } if (buffer_journal_dirty(bh)) { count_already_incd = 1; PROC_INFO_INC(sb, journal.mark_dirty_notjournal); clear_buffer_journal_dirty(bh); } if (journal->j_len > journal->j_len_alloc) { journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT; } set_buffer_journaled(bh); /* now put this guy on the end */ if (!cn) { cn = get_cnode(sb); if (!cn) { reiserfs_panic(sb, "journal-4", "get_cnode failed!"); } if (th->t_blocks_logged == th->t_blocks_allocated) { th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT; journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT; } th->t_blocks_logged++; journal->j_len++; cn->bh = bh; cn->blocknr = bh->b_blocknr; cn->sb = sb; cn->jlist = NULL; insert_journal_hash(journal->j_hash_table, cn); if (!count_already_incd) { get_bh(bh); } } cn->next = NULL; cn->prev = journal->j_last; cn->bh = bh; if (journal->j_last) { journal->j_last->next = cn; journal->j_last = cn; } else { journal->j_first = cn; journal->j_last = cn; } reiserfs_schedule_old_flush(sb); return 0; } int journal_end(struct reiserfs_transaction_handle *th) { struct super_block *sb = th->t_super; if (!current->journal_info && th->t_refcount > 1) reiserfs_warning(sb, "REISER-NESTING", "th NULL, refcount %d", th->t_refcount); if (!th->t_trans_id) { WARN_ON(1); return -EIO; } th->t_refcount--; if (th->t_refcount > 0) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* * we aren't allowed to close a nested transaction on a * different filesystem from the one in the task struct */ BUG_ON(cur_th->t_super != th->t_super); if (th != cur_th) { memcpy(current->journal_info, th, sizeof(*th)); th->t_trans_id = 0; } return 0; } else { return do_journal_end(th, 0); } } /* * removes from the current transaction, relsing and descrementing any counters. * also files the removed buffer directly onto the clean list * * called by journal_mark_freed when a block has been deleted * * returns 1 if it cleaned and relsed the buffer. 0 otherwise */ static int remove_from_transaction(struct super_block *sb, b_blocknr_t blocknr, int already_cleaned) { struct buffer_head *bh; struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (!cn || !cn->bh) { return ret; } bh = cn->bh; if (cn->prev) { cn->prev->next = cn->next; } if (cn->next) { cn->next->prev = cn->prev; } if (cn == journal->j_first) { journal->j_first = cn->next; } if (cn == journal->j_last) { journal->j_last = cn->prev; } remove_journal_hash(sb, journal->j_hash_table, NULL, bh->b_blocknr, 0); clear_buffer_journaled(bh); /* don't log this one */ if (!already_cleaned) { clear_buffer_journal_dirty(bh); clear_buffer_dirty(bh); clear_buffer_journal_test(bh); put_bh(bh); if (atomic_read(&bh->b_count) < 0) { reiserfs_warning(sb, "journal-1752", "b_count < 0"); } ret = 1; } journal->j_len--; journal->j_len_alloc--; free_cnode(sb, cn); return ret; } /* * for any cnode in a journal list, it can only be dirtied of all the * transactions that include it are committed to disk. * this checks through each transaction, and returns 1 if you are allowed * to dirty, and 0 if you aren't * * it is called by dirty_journal_list, which is called after * flush_commit_list has gotten all the log blocks for a given * transaction on disk * */ static int can_dirty(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; struct reiserfs_journal_cnode *cur = cn->hprev; int can_dirty = 1; /* * first test hprev. These are all newer than cn, so any node here * with the same block number and dev means this node can't be sent * to disk right now. */ while (cur && can_dirty) { if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hprev; } /* * then test hnext. These are all older than cn. As long as they * are committed to the log, it is safe to write cn to disk */ cur = cn->hnext; while (cur && can_dirty) { if (cur->jlist && cur->jlist->j_len > 0 && atomic_read(&cur->jlist->j_commit_left) > 0 && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hnext; } return can_dirty; } /* * syncs the commit blocks, but does not force the real buffers to disk * will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th) { struct super_block *sb = th->t_super; struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); /* you can sync while nested, very, very bad */ BUG_ON(th->t_refcount > 1); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb)); } return do_journal_end(th, COMMIT_NOW | WAIT); } /* writeback the pending async commits to disk */ static void flush_async_commits(struct work_struct *work) { struct reiserfs_journal *journal = container_of(work, struct reiserfs_journal, j_work.work); struct super_block *sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; reiserfs_write_lock(sb); if (!list_empty(&journal->j_journal_list)) { /* last entry is the youngest, commit it and you get everything */ entry = journal->j_journal_list.prev; jl = JOURNAL_LIST_ENTRY(entry); flush_commit_list(sb, jl, 1); } reiserfs_write_unlock(sb); } /* * flushes any old transactions to disk * ends the current transaction if it is too old */ void reiserfs_flush_old_commits(struct super_block *sb) { time64_t now; struct reiserfs_transaction_handle th; struct reiserfs_journal *journal = SB_JOURNAL(sb); now = ktime_get_seconds(); /* * safety check so we don't flush while we are replaying the log during * mount */ if (list_empty(&journal->j_journal_list)) return; /* * check the current transaction. If there are no writers, and it is * too old, finish it, and force the commit blocks to disk */ if (atomic_read(&journal->j_wcount) <= 0 && journal->j_trans_start_time > 0 && journal->j_len > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) { if (!journal_join(&th, sb)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb)); /* * we're only being called from kreiserfsd, it makes * no sense to do an async commit so that kreiserfsd * can do it later */ do_journal_end(&th, COMMIT_NOW | WAIT); } } } /* * returns 0 if do_journal_end should return right away, returns 1 if * do_journal_end should finish the commit * * if the current transaction is too old, but still has writers, this will * wait on j_join_wait until all the writers are done. By the time it * wakes up, the transaction it was called has already ended, so it just * flushes the commit list and returns 0. * * Won't batch when flush or commit_now is set. Also won't batch when * others are waiting on j_join_wait. * * Note, we can't allow the journal_end to proceed while there are still * writers in the log. */ static int check_journal_end(struct reiserfs_transaction_handle *th, int flags) { time64_t now; int flush = flags & FLUSH_ALL; int commit_now = flags & COMMIT_NOW; int wait_on_commit = flags & WAIT; struct reiserfs_journal_list *jl; struct super_block *sb = th->t_super; struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged); /* <= 0 is allowed. unmounting might not call begin */ if (atomic_read(&journal->j_wcount) > 0) atomic_dec(&journal->j_wcount); /* * BUG, deal with case where j_len is 0, but people previously * freed blocks need to be released will be dealt with by next * transaction that actually writes something, but should be taken * care of in this trans */ BUG_ON(journal->j_len == 0); /* * if wcount > 0, and we are called to with flush or commit_now, * we wait on j_join_wait. We will wake up when the last writer has * finished the transaction, and started it on its way to the disk. * Then, we flush the commit or journal list, and just return 0 * because the rest of journal end was already done for this * transaction. */ if (atomic_read(&journal->j_wcount) > 0) { if (flush || commit_now) { unsigned trans_id; jl = journal->j_current_jl; trans_id = jl->j_trans_id; if (wait_on_commit) jl->j_state |= LIST_COMMIT_PENDING; atomic_set(&journal->j_jlock, 1); if (flush) { journal->j_next_full_flush = 1; } unlock_journal(sb); /* * sleep while the current transaction is * still j_jlocked */ while (journal->j_trans_id == trans_id) { if (atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } else { lock_journal(sb); if (journal->j_trans_id == trans_id) { atomic_set(&journal->j_jlock, 1); } unlock_journal(sb); } } BUG_ON(journal->j_trans_id == trans_id); if (commit_now && journal_list_still_alive(sb, trans_id) && wait_on_commit) { flush_commit_list(sb, jl, 1); } return 0; } unlock_journal(sb); return 0; } /* deal with old transactions where we are the last writers */ now = ktime_get_seconds(); if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) { commit_now = 1; journal->j_next_async_flush = 1; } /* don't batch when someone is waiting on j_join_wait */ /* don't batch when syncing the commit or flushing the whole trans */ if (!(journal->j_must_wait > 0) && !(atomic_read(&journal->j_jlock)) && !flush && !commit_now && (journal->j_len < journal->j_max_batch) && journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) { journal->j_bcount++; unlock_journal(sb); return 0; } if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) { reiserfs_panic(sb, "journal-003", "j_start (%ld) is too high", journal->j_start); } return 1; } /* * Does all the work that makes deleting blocks safe. * when deleting a block mark BH_JNew, just remove it from the current * transaction, clean it's buffer_head and move on. * * otherwise: * set a bit for the block in the journal bitmap. That will prevent it from * being allocated for unformatted nodes before this transaction has finished. * * mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. * That will prevent any old transactions with this block from trying to flush * to the real location. Since we aren't removing the cnode from the * journal_list_hash, *the block can't be reallocated yet. * * Then remove it from the current transaction, decrementing any counters and * filing it on the clean list. */ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *sb, b_blocknr_t blocknr) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; struct buffer_head *bh = NULL; struct reiserfs_list_bitmap *jb = NULL; int cleaned = 0; BUG_ON(!th->t_trans_id); cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (cn && cn->bh) { bh = cn->bh; get_bh(bh); } /* if it is journal new, we just remove it from this transaction */ if (bh && buffer_journal_new(bh)) { clear_buffer_journal_new(bh); clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); cleaned = remove_from_transaction(sb, blocknr, cleaned); } else { /* * set the bit for this block in the journal bitmap * for this transaction */ jb = journal->j_current_jl->j_list_bitmap; if (!jb) { reiserfs_panic(sb, "journal-1702", "journal_list_bitmap is NULL"); } set_bit_in_list_bitmap(sb, blocknr, jb); /* Note, the entire while loop is not allowed to schedule. */ if (bh) { clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); } cleaned = remove_from_transaction(sb, blocknr, cleaned); /* * find all older transactions with this block, * make sure they don't try to write it out */ cn = get_journal_hash_dev(sb, journal->j_list_hash_table, blocknr); while (cn) { if (sb == cn->sb && blocknr == cn->blocknr) { set_bit(BLOCK_FREED, &cn->state); if (cn->bh) { /* * remove_from_transaction will brelse * the buffer if it was in the current * trans */ if (!cleaned) { clear_buffer_journal_dirty(cn-> bh); clear_buffer_dirty(cn->bh); clear_buffer_journal_test(cn-> bh); cleaned = 1; put_bh(cn->bh); if (atomic_read (&cn->bh->b_count) < 0) { reiserfs_warning(sb, "journal-2138", "cn->bh->b_count < 0"); } } /* * since we are clearing the bh, * we MUST dec nonzerolen */ if (cn->jlist) { atomic_dec(&cn->jlist-> j_nonzerolen); } cn->bh = NULL; } } cn = cn->hnext; } } if (bh) release_buffer_page(bh); /* get_hash grabs the buffer */ return 0; } void reiserfs_update_inode_transaction(struct inode *inode) { struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb); REISERFS_I(inode)->i_jl = journal->j_current_jl; REISERFS_I(inode)->i_trans_id = journal->j_trans_id; } /* * returns -1 on error, 0 if no commits/barriers were done and 1 * if a transaction was actually committed and the barrier was done */ static int __commit_trans_jl(struct inode *inode, unsigned long id, struct reiserfs_journal_list *jl) { struct reiserfs_transaction_handle th; struct super_block *sb = inode->i_sb; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; /* * is it from the current transaction, * or from an unknown transaction? */ if (id == journal->j_trans_id) { jl = journal->j_current_jl; /* * try to let other writers come in and * grow this transaction */ let_transaction_grow(sb, id); if (journal->j_trans_id != id) { goto flush_commit_only; } ret = journal_begin(&th, sb, 1); if (ret) return ret; /* someone might have ended this transaction while we joined */ if (journal->j_trans_id != id) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb)); ret = journal_end(&th); goto flush_commit_only; } ret = journal_end_sync(&th); if (!ret) ret = 1; } else { /* * this gets tricky, we have to make sure the journal list in * the inode still exists. We know the list is still around * if we've got a larger transaction id than the oldest list */ flush_commit_only: if (journal_list_still_alive(inode->i_sb, id)) { /* * we only set ret to 1 when we know for sure * the barrier hasn't been started yet on the commit * block. */ if (atomic_read(&jl->j_commit_left) > 1) ret = 1; flush_commit_list(sb, jl, 1); if (journal->j_errno) ret = journal->j_errno; } } /* otherwise the list is gone, and long since committed */ return ret; } int reiserfs_commit_for_inode(struct inode *inode) { unsigned int id = REISERFS_I(inode)->i_trans_id; struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl; /* * for the whole inode, assume unset id means it was * changed in the current transaction. More conservative */ if (!id || !jl) { reiserfs_update_inode_transaction(inode); id = REISERFS_I(inode)->i_trans_id; /* jl will be updated in __commit_trans_jl */ } return __commit_trans_jl(inode, id, jl); } void reiserfs_restore_prepared_buffer(struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); PROC_INFO_INC(sb, journal.restore_prepared); if (!bh) { return; } if (test_clear_buffer_journal_restore_dirty(bh) && buffer_journal_dirty(bh)) { struct reiserfs_journal_cnode *cn; reiserfs_write_lock(sb); cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bh->b_blocknr); if (cn && can_dirty(cn)) { set_buffer_journal_test(bh); mark_buffer_dirty(bh); } reiserfs_write_unlock(sb); } clear_buffer_journal_prepared(bh); } extern struct tree_balance *cur_tb; /* * before we can change a metadata block, we have to make sure it won't * be written to disk while we are altering it. So, we must: * clean it * wait on it. */ int reiserfs_prepare_for_journal(struct super_block *sb, struct buffer_head *bh, int wait) { PROC_INFO_INC(sb, journal.prepare); if (!trylock_buffer(bh)) { if (!wait) return 0; lock_buffer(bh); } set_buffer_journal_prepared(bh); if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) { clear_buffer_journal_test(bh); set_buffer_journal_restore_dirty(bh); } unlock_buffer(bh); return 1; } /* * long and ugly. If flush, will not return until all commit * blocks and all real buffers in the trans are on disk. * If no_async, won't return until all commit blocks are on disk. * * keep reading, there are comments as you go along * * If the journal is aborted, we just clean up. Things like flushing * journal lists, etc just won't happen. */ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags) { struct super_block *sb = th->t_super; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *next, *jl_cn; struct reiserfs_journal_cnode *last_cn = NULL; struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; /* commit bh */ struct buffer_head *d_bh; /* desc bh */ int cur_write_start = 0; /* start index of current log write */ int i; int flush; int wait_on_commit; struct reiserfs_journal_list *jl, *temp_jl; struct list_head *entry, *safe; unsigned long jindex; unsigned int commit_trans_id; int trans_half; int depth; BUG_ON(th->t_refcount > 1); BUG_ON(!th->t_trans_id); BUG_ON(!th->t_super); /* * protect flush_older_commits from doing mistakes if the * transaction ID counter gets overflowed. */ if (th->t_trans_id == ~0U) flags |= FLUSH_ALL | COMMIT_NOW | WAIT; flush = flags & FLUSH_ALL; wait_on_commit = flags & WAIT; current->journal_info = th->t_handle_save; reiserfs_check_lock_depth(sb, "journal end"); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb)); } lock_journal(sb); if (journal->j_next_full_flush) { flags |= FLUSH_ALL; flush = 1; } if (journal->j_next_async_flush) { flags |= COMMIT_NOW | WAIT; wait_on_commit = 1; } /* * check_journal_end locks the journal, and unlocks if it does * not return 1 it tells us if we should continue with the * journal_end, or just return */ if (!check_journal_end(th, flags)) { reiserfs_schedule_old_flush(sb); wake_queued_writers(sb); reiserfs_async_progress_wait(sb); goto out; } /* check_journal_end might set these, check again */ if (journal->j_next_full_flush) { flush = 1; } /* * j must wait means we have to flush the log blocks, and the * real blocks for this transaction */ if (journal->j_must_wait > 0) { flush = 1; } #ifdef REISERFS_PREALLOCATE /* * quota ops might need to nest, setup the journal_info pointer * for them and raise the refcount so that it is > 0. */ current->journal_info = th; th->t_refcount++; /* it should not involve new blocks into the transaction */ reiserfs_discard_all_prealloc(th); th->t_refcount--; current->journal_info = th->t_handle_save; #endif /* setup description block */ d_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start); set_buffer_uptodate(d_bh); desc = (struct reiserfs_journal_desc *)(d_bh)->b_data; memset(d_bh->b_data, 0, d_bh->b_size); memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8); set_desc_trans_id(desc, journal->j_trans_id); /* * setup commit block. Don't write (keep it clean too) this one * until after everyone else is written */ c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); commit = (struct reiserfs_journal_commit *)c_bh->b_data; memset(c_bh->b_data, 0, c_bh->b_size); set_commit_trans_id(commit, journal->j_trans_id); set_buffer_uptodate(c_bh); /* init this journal list */ jl = journal->j_current_jl; /* * we lock the commit before doing anything because * we want to make sure nobody tries to run flush_commit_list until * the new transaction is fully setup, and we've already flushed the * ordered bh list */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb); /* save the transaction id in case we need to commit it later */ commit_trans_id = jl->j_trans_id; atomic_set(&jl->j_older_commits_done, 0); jl->j_trans_id = journal->j_trans_id; jl->j_timestamp = journal->j_trans_start_time; jl->j_commit_bh = c_bh; jl->j_start = journal->j_start; jl->j_len = journal->j_len; atomic_set(&jl->j_nonzerolen, journal->j_len); atomic_set(&jl->j_commit_left, journal->j_len + 2); jl->j_realblock = NULL; /* * The ENTIRE FOR LOOP MUST not cause schedule to occur. * for each real block, add it to the journal list hash, * copy into real block index array in the commit or desc block */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) { if (buffer_journaled(cn->bh)) { jl_cn = get_cnode(sb); if (!jl_cn) { reiserfs_panic(sb, "journal-1676", "get_cnode returned NULL"); } if (i == 0) { jl->j_realblock = jl_cn; } jl_cn->prev = last_cn; jl_cn->next = NULL; if (last_cn) { last_cn->next = jl_cn; } last_cn = jl_cn; /* * make sure the block we are trying to log * is not a block of journal or reserved area */ if (is_block_in_log_or_reserved_area (sb, cn->bh->b_blocknr)) { reiserfs_panic(sb, "journal-2332", "Trying to log block %lu, " "which is a log block", cn->bh->b_blocknr); } jl_cn->blocknr = cn->bh->b_blocknr; jl_cn->state = 0; jl_cn->sb = sb; jl_cn->bh = cn->bh; jl_cn->jlist = jl; insert_journal_hash(journal->j_list_hash_table, jl_cn); if (i < trans_half) { desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr); } else { commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr); } } else { i--; } } set_desc_trans_len(desc, journal->j_len); set_desc_mount_id(desc, journal->j_mount_id); set_desc_trans_id(desc, journal->j_trans_id); set_commit_trans_len(commit, journal->j_len); /* * special check in case all buffers in the journal * were marked for not logging */ BUG_ON(journal->j_len == 0); /* * we're about to dirty all the log blocks, mark the description block * dirty now too. Don't mark the commit block dirty until all the * others are on disk */ mark_buffer_dirty(d_bh); /* * first data block is j_start + 1, so add one to * cur_write_start wherever you use it */ cur_write_start = journal->j_start; cn = journal->j_first; jindex = 1; /* start at one so we don't get the desc again */ while (cn) { clear_buffer_journal_new(cn->bh); /* copy all the real blocks into log area. dirty log blocks */ if (buffer_journaled(cn->bh)) { struct buffer_head *tmp_bh; char *addr; struct page *page; tmp_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(sb))); set_buffer_uptodate(tmp_bh); page = cn->bh->b_page; addr = kmap(page); memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data), cn->bh->b_size); kunmap(page); mark_buffer_dirty(tmp_bh); jindex++; set_buffer_journal_dirty(cn->bh); clear_buffer_journaled(cn->bh); } else { /* * JDirty cleared sometime during transaction. * don't log this one */ reiserfs_warning(sb, "journal-2048", "BAD, buffer in journal hash, " "but not JDirty!"); brelse(cn->bh); } next = cn->next; free_cnode(sb, cn); cn = next; reiserfs_cond_resched(sb); } /* * we are done with both the c_bh and d_bh, but * c_bh must be written after all other commit blocks, * so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1. */ journal->j_current_jl = alloc_journal_list(sb); /* now it is safe to insert this transaction on the main list */ list_add_tail(&jl->j_list, &journal->j_journal_list); list_add_tail(&jl->j_working_list, &journal->j_working_list); journal->j_num_work_lists++; /* reset journal values for the next transaction */ journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(sb); atomic_set(&journal->j_wcount, 0); journal->j_bcount = 0; journal->j_last = NULL; journal->j_first = NULL; journal->j_len = 0; journal->j_trans_start_time = 0; /* check for trans_id overflow */ if (++journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_current_jl->j_trans_id = journal->j_trans_id; journal->j_must_wait = 0; journal->j_len_alloc = 0; journal->j_next_full_flush = 0; journal->j_next_async_flush = 0; init_journal_hash(sb); /* * make sure reiserfs_add_jh sees the new current_jl before we * write out the tails */ smp_mb(); /* * tail conversion targets have to hit the disk before we end the * transaction. Otherwise a later transaction might repack the tail * before this transaction commits, leaving the data block unflushed * and clean, if we crash before the later transaction commits, the * data block is lost. */ if (!list_empty(&jl->j_tail_bh_list)) { depth = reiserfs_write_unlock_nested(sb); write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_tail_bh_list); reiserfs_write_lock_nested(sb, depth); } BUG_ON(!list_empty(&jl->j_tail_bh_list)); mutex_unlock(&jl->j_commit_mutex); /* * honor the flush wishes from the caller, simple commits can * be done outside the journal lock, they are done below * * if we don't flush the commit list right now, we put it into * the work queue so the people waiting on the async progress work * queue don't wait for this proc to flush journal lists and such. */ if (flush) { flush_commit_list(sb, jl, 1); flush_journal_list(sb, jl, 1); } else if (!(jl->j_state & LIST_COMMIT_PENDING)) { /* * Avoid queueing work when sb is being shut down. Transaction * will be flushed on journal shutdown. */ if (sb->s_flags & SB_ACTIVE) queue_delayed_work(REISERFS_SB(sb)->commit_wq, &journal->j_work, HZ / 10); } /* * if the next transaction has any chance of wrapping, flush * transactions that might get overwritten. If any journal lists * are very old flush them as well. */ first_jl: list_for_each_safe(entry, safe, &journal->j_journal_list) { temp_jl = JOURNAL_LIST_ENTRY(entry); if (journal->j_start <= temp_jl->j_start) { if ((journal->j_start + journal->j_trans_max + 1) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else if ((journal->j_start + journal->j_trans_max + 1) < SB_ONDISK_JOURNAL_SIZE(sb)) { /* * if we don't cross into the next * transaction and we don't wrap, there is * no way we can overlap any later transactions * break now */ break; } } else if ((journal->j_start + journal->j_trans_max + 1) > SB_ONDISK_JOURNAL_SIZE(sb)) { if (((journal->j_start + journal->j_trans_max + 1) % SB_ONDISK_JOURNAL_SIZE(sb)) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else { /* * we don't overlap anything from out start * to the end of the log, and our wrapped * portion doesn't overlap anything at * the start of the log. We can break */ break; } } } journal->j_current_jl->j_list_bitmap = get_list_bitmap(sb, journal->j_current_jl); if (!(journal->j_current_jl->j_list_bitmap)) { reiserfs_panic(sb, "journal-1996", "could not get a list bitmap"); } atomic_set(&journal->j_jlock, 0); unlock_journal(sb); /* wake up any body waiting to join. */ clear_bit(J_WRITERS_QUEUED, &journal->j_state); wake_up(&journal->j_join_wait); if (!flush && wait_on_commit && journal_list_still_alive(sb, commit_trans_id)) { flush_commit_list(sb, jl, 1); } out: reiserfs_check_lock_depth(sb, "journal end2"); memset(th, 0, sizeof(*th)); /* * Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return journal->j_errno; } /* Send the file system read only and refuse new transactions */ void reiserfs_abort_journal(struct super_block *sb, int errno) { struct reiserfs_journal *journal = SB_JOURNAL(sb); if (test_bit(J_ABORTED, &journal->j_state)) return; if (!journal->j_errno) journal->j_errno = errno; sb->s_flags |= SB_RDONLY; set_bit(J_ABORTED, &journal->j_state); #ifdef CONFIG_REISERFS_CHECK dump_stack(); #endif }
(CNA/Lauren Cater) Pope Francis: Kiss the Crucifix; Kiss the Wounds of Christ The Holy Father discussed the triumph of the cross at his April 16 general audience. ELISE HARRIS/CNA/EWTN NEWS VATICAN CITY — In his general audience address this week, Pope Francis spoke on the meaning of suffering and evil, explaining that it is a mystery which finds its answer in the passion and death of Jesus, who endured it for each of us. “This week, it will do good for us all to look to the crucifix, kissing the wounds of Jesus, kissing the crucifix. He has taken upon himself the whole of human suffering,” the Holy Father expressed April 16. Speaking to the thousands gathered for his weekly address, the Pope began by drawing attention to the day’s Gospel reading, which recounts the betrayal of Judas, noting that this event marks the beginning of Christ’s passion. With his death on the cross, “Jesus reaches complete humiliation,” the Pope observed, highlighting how “it involved the worst death; that which was reserved for slaves and criminals,” and that although “Jesus was considered a prophet,” he “died as a criminal.” “Looking at Jesus in his passion, we see, as in a mirror, also the suffering of all humanity and find the divine answer to the mystery of evil, of suffering, of death,” he continued. He noted, “Many times, we experience horror in the face of the evil and suffering that surrounds us, and we ask: Why does God permit it? It’s a deep wound for us to see suffering and death, especially that of the innocent.” This wound especially stings “when we see children suffering. … It's a wound in the heart. It's the mystery of evil,” he said, “and Jesus takes all this evil, all this suffering, upon himself.” Oftentimes, we believe that “God, in his omnipotence, will defeat injustice, evil, sin and suffering with a triumphant divine victory,” the Holy Father said; however, instead, he shows us “a humble victory that seems like a human failure to us.” “We can say: God wins precisely in failure. The Son of God, in fact, appears on the cross as a defeated man. He suffers, is betrayed, is scorned and finally dies.” Drawing attention to how “Jesus permits that evil crosses the line with him, and takes it upon himself to conquer it,” the Pope emphasized that “his passion is not an accident; his death — that death — was ‘written.’” Referring to “the mystery of the great humility of God,” Pope Francis observed, “Really, we don't have many explanations; it's a puzzling mystery: ‘For God has so loved the world that he gave his only Son.’” “This week, we think so much of the pain of Jesus,” he stated, “and we tell ourselves: ‘This is for me. Even if I had been the only person in the world, he would have done it.’” “’He did it for me.’ And we kiss the crucifix and say: ‘For me. Thank you, Jesus. For me.’” “And when all seems lost, when there is no one left because they will strike ‘the Shepherd, and the sheep of the flock will be scattered,’” he concluded, “it is then that God intervenes with the power of the Resurrection.”
<filename>build/tmp/expandedArchives/forge-1.17.1-37.0.58_mapped_official_1.17.1-sources.jar_461b1baaba5fdaecf94c73039d52c00b/net/minecraft/client/gui/screens/Overlay.java package net.minecraft.client.gui.screens; import net.minecraft.client.gui.GuiComponent; import net.minecraft.client.gui.components.Widget; import net.minecraftforge.api.distmarker.Dist; import net.minecraftforge.api.distmarker.OnlyIn; @OnlyIn(Dist.CLIENT) public abstract class Overlay extends GuiComponent implements Widget { public boolean isPauseScreen() { return true; } }
//! Test sequences with immediate commands followed by a marker class Tester : public ImmediateBase::Tester { public: Tester( const SequenceFiles::File::Format::t format = SequenceFiles::File::Format::F_PRIME ); public: void NeverLoaded(); void FileErrors(); void LoadRunRun(); void UnexpectedCommandResponse(); void NewSequence(); void Manual(); void FailedCommands(); void AutoByCommand(); void AutoByPort(); void InvalidManualCommands(); void LoadOnInit(); void SequenceTimeout(); void Cancel(); void Validate(); private: void executeCommandsManual( const char *const fileName, const U32 numCommands ); }
<filename>modules/dop/component-protocol/components/auto-test-scenes/rightPage/fileDetail/fileExecute/executeTaskBreadcrumb/render.go // Copyright (c) 2021 Terminus, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package executeTaskBreadcrumb import ( "context" "encoding/json" "fmt" "strconv" "github.com/sirupsen/logrus" "github.com/erda-project/erda-infra/base/servicehub" "github.com/erda-project/erda-infra/providers/component-protocol/cptype" "github.com/erda-project/erda-infra/providers/component-protocol/utils/cputil" "github.com/erda-project/erda/apistructs" "github.com/erda-project/erda/bundle" "github.com/erda-project/erda/modules/dop/component-protocol/components/auto-test-scenes/common/gshelper" "github.com/erda-project/erda/modules/dop/component-protocol/types" "github.com/erda-project/erda/modules/openapi/component-protocol/components/base" ) type ComponentAction struct { base.DefaultProvider sdk *cptype.SDK bdl *bundle.Bundle gsHelper *gshelper.GSHelper Type string `json:"type"` Props map[string]interface{} `json:"props"` State state `json:"state"` Operations map[string]interface{} `json:"operations"` Data map[string]interface{} `json:"data"` pipelineIDFromExecuteHistoryTable uint64 pipelineIDFromExecuteTaskBreadcrumb uint64 visible bool sceneID uint64 } type state struct { Name string `json:"name"` Unfold bool `json:"unfold"` } type Breadcrumb struct { Key string `json:"key"` Item string `json:"item"` } func (a *ComponentAction) Import(c *cptype.Component) error { b, err := json.Marshal(c) if err != nil { return err } if err := json.Unmarshal(b, a); err != nil { return err } return nil } func init() { base.InitProviderWithCreator("auto-test-scenes", "executeTaskBreadcrumb", func() servicehub.Provider { return &ComponentAction{} }) } func (a *ComponentAction) Render(ctx context.Context, c *cptype.Component, scenario cptype.Scenario, event cptype.ComponentEvent, gs *cptype.GlobalStateData) error { gh := gshelper.NewGSHelper(gs) a.Type = "Breadcrumb" if err := a.Import(c); err != nil { logrus.Errorf("import component failed, err:%v", err) return err } a.pipelineIDFromExecuteHistoryTable = gh.GetExecuteHistoryTablePipelineID() a.sceneID = gh.GetFileTreeSceneID() a.sdk = cputil.SDK(ctx) a.bdl = ctx.Value(types.GlobalCtxKeyBundle).(*bundle.Bundle) a.gsHelper = gshelper.NewGSHelper(gs) if a.pipelineIDFromExecuteHistoryTable == 0 { c.Data = map[string]interface{}{} c.State = map[string]interface{}{"visible": true} return nil } // listen on operation switch event.Operation { case cptype.RenderingOperation, cptype.InitializeOperation: setID := a.gsHelper.GetGlobalSelectedSetID() if a.sceneID == 0 && setID == 0 { return nil } res := []Breadcrumb{} if !a.State.Unfold { if a.sceneID != 0 { req := apistructs.AutotestSceneRequest{SceneID: a.sceneID} req.UserID = a.sdk.Identity.UserID scene, err := a.bdl.GetAutoTestScene(req) if err != nil { logrus.Errorf("get autoTestScene failed, err: %v", err) return err } a.State.Name = scene.Name } if setID != 0 { req := apistructs.SceneSetRequest{SetID: setID} req.UserID = a.sdk.Identity.UserID sceneSet, err := a.bdl.GetSceneSet(req) if err != nil { logrus.Errorf("get autoTestSceneSet failed, err: %v", err) return err } a.State.Name = sceneSet.Name } } else { b, err := json.Marshal(a.Data["list"]) if err != nil { return err } if err := json.Unmarshal(b, &res); err != nil { return err } } a.State.Unfold = false fmt.Println("name: ", a.State.Name, " pipelineID: ", a.pipelineIDFromExecuteHistoryTable) res = append(res, Breadcrumb{ Key: strconv.FormatUint(a.pipelineIDFromExecuteHistoryTable, 10), Item: a.State.Name, }) a.visible = len(res) < 2 a.Data = map[string]interface{}{"list": res} case cptype.OperationKey(apistructs.ExecuteTaskBreadcrumbSelectItem): res := []Breadcrumb{} b, err := json.Marshal(a.Data["list"]) if err != nil { return err } if err := json.Unmarshal(b, &res); err != nil { return err } ret := struct { Meta Breadcrumb `json:"meta"` }{} b, err = json.Marshal(event.OperationData) if err != nil { return err } if err := json.Unmarshal(b, &ret); err != nil { return err } lists := []Breadcrumb{} for _, each := range res { lists = append(lists, each) if each.Key == ret.Meta.Key { break } } a.visible = len(lists) < 2 a.Data = map[string]interface{}{"list": lists} a.pipelineIDFromExecuteTaskBreadcrumb, err = strconv.ParseUint(ret.Meta.Key, 10, 64) if err != nil { logrus.Errorf("pipelineId ParseUint failed, err: %v", err) return err } gh.SetExecuteHistoryTablePipelineID(a.pipelineIDFromExecuteTaskBreadcrumb) } c.Operations = getOperations() c.Data = a.Data var err error defer func() { fail := a.marshal(c) if err == nil && fail != nil { err = fail } }() // set global state gh.SetExecuteTaskBreadcrumbPipelineID(a.pipelineIDFromExecuteTaskBreadcrumb) gh.SetExecuteTaskBreadcrumbVisible(a.visible) return nil } func (a *ComponentAction) marshal(c *cptype.Component) error { stateValue, err := json.Marshal(a.State) if err != nil { return err } var state map[string]interface{} err = json.Unmarshal(stateValue, &state) if err != nil { return err } propValue, err := json.Marshal(a.Props) if err != nil { return err } var props interface{} err = json.Unmarshal(propValue, &props) if err != nil { return err } c.Props = props c.State = state c.Type = a.Type return nil } func getOperations() map[string]interface{} { return map[string]interface{}{ "click": map[string]interface{}{ "key": "selectItem", "reload": true, "fillMeta": "key", "meta": map[string]interface{}{"key": ""}, }, } } func (ca *ComponentAction) setData() error { rsp, err := ca.bdl.GetPipeline(ca.pipelineIDFromExecuteTaskBreadcrumb) if err != nil { return err } lists := []Breadcrumb{} for _, each := range rsp.PipelineStages { list := Breadcrumb{ Key: strconv.FormatUint(each.ID, 10), Item: each.Name, } lists = append(lists, list) } ca.Data = map[string]interface{}{"list": lists} return nil }
<filename>code/src/OSFramework/Feature/IOffset.ts // eslint-disable-next-line @typescript-eslint/no-unused-vars namespace OSFramework.Feature { export interface IOffset { /** Sets the current Offset of the Map * Get the OffsetX(vertical) and OffsetY(horizontal) in pixels, to apply on map. This will apply based on defined Location. */ getOffset: OSStructures.OSMap.Offset; /** Sets the current Offset of the Map * Set the OffsetX(vertical) and OffsetY(horizontal) in pixels, to apply on map. This will apply based on defined Location. * @param value Structure containing the OffsetX and OffsetY. (E.g. {OffsetX: 100, OffsetY: 100}) */ setOffset(value: OSStructures.OSMap.Offset): void; } }
In a victory for financial reformers who want to lessen Wall Street’s influence on regulatory policy, the White House on Tuesday nominated Lisa Fairfax, a law professor from George Washington University, to fill an open Democratic seat on the Securities and Exchange Commission vacated by current Commissioner Luis Aguilar. The administration opted for an academic recommended by Sen. Sherrod Brown, D-Ohio, and favored by Sen. Elizabeth Warren, D-Mass., over their initial choice, Keir Gumbs, a corporate lawyer with the high-powered firm Covington & Burling, which represents numerous financial institutions. The selection is an affirmation of Warren’s strategy of making it difficult and painful for Obama to pick nominees with ties to Wall Street. Gumbs, a former SEC staffer, allegedly gave CEOs tutorials on how to avoid disclosing their corporate political spending while at Covington. He also represented the American Petroleum Institute before the SEC. He was seen as a corporate-friendly ally for Chair Mary Jo White, who would help marginalize the more reform-minded Democrat on the panel, Kara Stein. That’s why White lobbied to replace Aguilar with a moderate. But activist groups like Rootstrikers and Public Citizen whipped up opposition to Gumbs, leading to the White House agreeing to vet additional candidates without ties to corporate America. Word of Fairfax’s potential appointment leaked in September. Fairfax is an expert on shareholder activism: the efforts by investors to force changes in corporate behavior after acquiring large blocks of shares. Warren included Fairfax on a list of potential candidates for the SEC vacancy, though Brown ultimately made the recommendation. If she proves a strong advocate for reform and an ally to Kara Stein, it puts White on the hot seat and will make her plans to weaken the commission more untenable. The Obama administration paired the selection of Fairfax with Hester Pierce, a former aide to Sen. Richard Shelby, R-Ala., who would replace outgoing Republican commissioner Daniel Gallagher. The package deal is likely to smooth confirmation. Financial reformers see blocking Gumbs and getting Fairfax onto the board as a triumph. Rootstrikers, among the leading groups opposing Gumbs, tweeted, “It’s official — next SEC nominee is not another Wall Street insider, thanks to grassroots pressure!” Denying Gumbs the SEC appointment, against the preferences of the White House is the latest in a concerted campaign to increase the diversity of perspectives at financial regulators — and in particular to end the “revolving door” between Washington and Wall Street. The philosophy can be summed up with one phrase: “Personnel is policy.” Who gets to implement and enforce laws, or exercise independent judgment, has an enormous bearing on the ultimate outcomes. By using an inside/outside strategy, with politicians and activist groups forming a united front, it makes it more difficult for current and future Democratic administrations to populate their staffs by picking from the Wall Street talent pool. Regardless of individual views or the desires of Wall Street-aligned donors, if presidents can’t get their picks through the Senate, they’ll take the path of least resistance, and open up their personnel to a less corporate-friendly point of view. Reacting to the news, Warren spokesperson Lacey Rose said, “Senator Warren looks forward to meeting with the nominees and discussing their views on how the SEC can better protect investors and the safety of our financial markets.”
package testdata import ( "testing" "github.com/stretchr/testify/assert" ) func TestFileEmbedding(t *testing.T) { _, err := dataFilesRoot.ReadFile("data-files/README.md") assert.NoError(t, err) files, err := dataFilesRoot.ReadDir("data-files/server-side-eval") assert.NoError(t, err) assert.NotEqual(t, 0, len(files)) }
Palette-Based Image Recoloring Using Color Decomposition Optimization Previous works on palette-based color manipulation typically fail to produce visually pleasing results with vivid color and natural appearance. In this paper, we present an approach to edit colors of an image by adjusting a compact color palette. Different from existing methods that fail to preserve inherent color characteristics residing in the source image, we propose a color decomposition optimization for flexible recoloring while retaining these characteristics. For an input image, we first employ a variant of the $k$ -means algorithm to create a palette consisting of a small set of most representative colors. Next, we propose a color decomposition optimization to decompose colors of the entire image into linear combinations of basis colors in the palette. The captured linear relationships then allow us to recolor the image by recombining the coding coefficients with a user-modified palette. Qualitative comparisons with existing methods show that our approach can more effectively recolor images. Further user study quantitatively demonstrates that our method is a good candidate for color manipulation tasks. In addition, we showcase some applications enabled by our method, including pattern colorings suggesting, color transfer, tissue staining analysis and color image segmentation.
//----------------------------------------------------------- // verifyConstTypes // Verify that the type is a type we support //----------------------------------------------------------- static bool verifyConstTypes( DWORD vt ) { switch ( vt ) { case VT_UI8: case VT_I8: case VT_I4: case VT_UI1: case VT_I2: case VT_R4: case VT_R8: case VT_BOOL: case VT_DATE: case VT_BSTR: case VT_I1: case VT_UI2: case VT_UI4: case VT_INT: case VT_UINT: case VT_DECIMAL: return true; } return false; }
def inactive_status(self, inactive_status): self._inactive_status = inactive_status
// Decompiled by Jad v1.5.8g. Copyright 2001 <NAME>. // Jad home page: http://www.kpdus.com/jad.html // Decompiler options: packimports(3) annotate safe package com.google.android.gms.internal.ads; import java.lang.reflect.Constructor; import java.util.*; // Referenced classes of package com.google.android.gms.internal.ads: // avh, atb, arh, atv, // atd, aqq, ati, avf, // asr, awf, aru, arx, // avm, atq, ash, asn, // aun final class aup { static int a(int i1, Object obj, aun aun) { if(obj instanceof atb) //* 0 0:aload_1 //* 1 1:instanceof #35 <Class atb> //* 2 4:ifeq 16 return arh.a(i1, (atb)obj); // 3 7:iload_0 // 4 8:aload_1 // 5 9:checkcast #35 <Class atb> // 6 12:invokestatic #40 <Method int arh.a(int, atb)> // 7 15:ireturn else return arh.b(i1, (atv)obj, aun); // 8 16:iload_0 // 9 17:aload_1 // 10 18:checkcast #42 <Class atv> // 11 21:aload_2 // 12 22:invokestatic #45 <Method int arh.b(int, atv, aun)> // 13 25:ireturn } static int a(int i1, List list) { int l1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 5 int j1 = 0; // 3 8:iconst_0 // 4 9:istore_2 boolean flag = false; // 5 10:iconst_0 // 6 11:istore 4 if(l1 == 0) //* 7 13:iload 5 //* 8 15:ifne 20 return 0; // 9 18:iconst_0 // 10 19:ireturn int k1 = arh.e(i1) * l1; // 11 20:iload_0 // 12 21:invokestatic #56 <Method int arh.e(int)> // 13 24:iload 5 // 14 26:imul // 15 27:istore_3 i1 = k1; // 16 28:iload_3 // 17 29:istore_0 if(list instanceof atd) //* 18 30:aload_1 //* 19 31:instanceof #58 <Class atd> //* 20 34:ifeq 104 { list = ((List) ((atd)list)); // 21 37:aload_1 // 22 38:checkcast #58 <Class atd> // 23 41:astore_1 i1 = k1; // 24 42:iload_3 // 25 43:istore_0 j1 = ((int) (flag)); // 26 44:iload 4 // 27 46:istore_2 do { k1 = i1; // 28 47:iload_0 // 29 48:istore_3 if(j1 >= l1) break; // 30 49:iload_2 // 31 50:iload 5 // 32 52:icmpge 161 Object obj = ((atd) (list)).b(j1); // 33 55:aload_1 // 34 56:iload_2 // 35 57:invokeinterface #61 <Method Object atd.b(int)> // 36 62:astore 6 if(obj instanceof aqq) //* 37 64:aload 6 //* 38 66:instanceof #63 <Class aqq> //* 39 69:ifeq 86 i1 += arh.b((aqq)obj); // 40 72:iload_0 // 41 73:aload 6 // 42 75:checkcast #63 <Class aqq> // 43 78:invokestatic #66 <Method int arh.b(aqq)> // 44 81:iadd // 45 82:istore_0 else //* 46 83:goto 97 i1 += arh.b((String)obj); // 47 86:iload_0 // 48 87:aload 6 // 49 89:checkcast #68 <Class String> // 50 92:invokestatic #71 <Method int arh.b(String)> // 51 95:iadd // 52 96:istore_0 j1++; // 53 97:iload_2 // 54 98:iconst_1 // 55 99:iadd // 56 100:istore_2 } while(true); // 57 101:goto 47 } else { do { k1 = i1; // 58 104:iload_0 // 59 105:istore_3 if(j1 >= l1) break; // 60 106:iload_2 // 61 107:iload 5 // 62 109:icmpge 161 Object obj1 = list.get(j1); // 63 112:aload_1 // 64 113:iload_2 // 65 114:invokeinterface #74 <Method Object List.get(int)> // 66 119:astore 6 if(obj1 instanceof aqq) //* 67 121:aload 6 //* 68 123:instanceof #63 <Class aqq> //* 69 126:ifeq 143 i1 += arh.b((aqq)obj1); // 70 129:iload_0 // 71 130:aload 6 // 72 132:checkcast #63 <Class aqq> // 73 135:invokestatic #66 <Method int arh.b(aqq)> // 74 138:iadd // 75 139:istore_0 else //* 76 140:goto 154 i1 += arh.b((String)obj1); // 77 143:iload_0 // 78 144:aload 6 // 79 146:checkcast #68 <Class String> // 80 149:invokestatic #71 <Method int arh.b(String)> // 81 152:iadd // 82 153:istore_0 j1++; // 83 154:iload_2 // 84 155:iconst_1 // 85 156:iadd // 86 157:istore_2 } while(true); // 87 158:goto 104 } return k1; // 88 161:iload_3 // 89 162:ireturn } static int a(int i1, List list, aun aun) { int k1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int j1 = 0; // 3 8:iconst_0 // 4 9:istore_3 if(k1 == 0) //* 5 10:iload 4 //* 6 12:ifne 17 return 0; // 7 15:iconst_0 // 8 16:ireturn i1 = arh.e(i1) * k1; // 9 17:iload_0 // 10 18:invokestatic #56 <Method int arh.e(int)> // 11 21:iload 4 // 12 23:imul // 13 24:istore_0 for(; j1 < k1; j1++) //* 14 25:iload_3 //* 15 26:iload 4 //* 16 28:icmpge 81 { Object obj = list.get(j1); // 17 31:aload_1 // 18 32:iload_3 // 19 33:invokeinterface #74 <Method Object List.get(int)> // 20 38:astore 5 if(obj instanceof atb) //* 21 40:aload 5 //* 22 42:instanceof #35 <Class atb> //* 23 45:ifeq 62 i1 += arh.a((atb)obj); // 24 48:iload_0 // 25 49:aload 5 // 26 51:checkcast #35 <Class atb> // 27 54:invokestatic #78 <Method int arh.a(atb)> // 28 57:iadd // 29 58:istore_0 else //* 30 59:goto 74 i1 += arh.b((atv)obj, aun); // 31 62:iload_0 // 32 63:aload 5 // 33 65:checkcast #42 <Class atv> // 34 68:aload_2 // 35 69:invokestatic #81 <Method int arh.b(atv, aun)> // 36 72:iadd // 37 73:istore_0 } // 38 74:iload_3 // 39 75:iconst_1 // 40 76:iadd // 41 77:istore_3 //* 42 78:goto 25 return i1; // 43 81:iload_0 // 44 82:ireturn } static int a(int i1, List list, boolean flag) { if(list.size() == 0) //* 0 0:aload_1 //* 1 1:invokeinterface #52 <Method int List.size()> //* 2 6:ifne 11 return 0; // 3 9:iconst_0 // 4 10:ireturn else return a(list) + list.size() * arh.e(i1); // 5 11:aload_1 // 6 12:invokestatic #85 <Method int a(List)> // 7 15:aload_1 // 8 16:invokeinterface #52 <Method int List.size()> // 9 21:iload_0 // 10 22:invokestatic #56 <Method int arh.e(int)> // 11 25:imul // 12 26:iadd // 13 27:ireturn } static int a(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof ati) //* 11 19:aload_0 //* 12 20:instanceof #87 <Class ati> //* 13 23:ifeq 59 { list = ((List) ((ati)list)); // 14 26:aload_0 // 15 27:checkcast #87 <Class ati> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.d(((ati) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #90 <Method long ati.b(int)> // 28 47:invokestatic #93 <Method int arh.d(long)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.d(((Long)list.get(i2)).longValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #95 <Class Long> // 50 82:invokevirtual #99 <Method long Long.longValue()> // 51 85:invokestatic #93 <Method int arh.d(long)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } public static avf a() { return b; // 0 0:getstatic #22 <Field avf b> // 1 3:areturn } private static avf a(boolean flag) { Object obj; try { obj = ((Object) (e())); // 0 0:invokestatic #104 <Method Class e()> // 1 3:astore_1 } //* 2 4:aload_1 //* 3 5:ifnonnull 10 //* 4 8:aconst_null //* 5 9:areturn //* 6 10:aload_1 //* 7 11:iconst_1 //* 8 12:anewarray Class[] //* 9 15:dup //* 10 16:iconst_0 //* 11 17:getstatic #111 <Field Class Boolean.TYPE> //* 12 20:aastore //* 13 21:invokevirtual #115 <Method Constructor Class.getConstructor(Class[])> //* 14 24:iconst_1 //* 15 25:anewarray Object[] //* 16 28:dup //* 17 29:iconst_0 //* 18 30:iload_0 //* 19 31:invokestatic #119 <Method Boolean Boolean.valueOf(boolean)> //* 20 34:aastore //* 21 35:invokevirtual #125 <Method Object Constructor.newInstance(Object[])> //* 22 38:checkcast #127 <Class avf> //* 23 41:astore_1 //* 24 42:aload_1 //* 25 43:areturn catch(Throwable throwable) //* 26 44:astore_1 { return null; // 27 45:aconst_null // 28 46:areturn } if(obj == null) return null; obj = ((Object) ((avf)((Class) (obj)).getConstructor(new Class[] { Boolean.TYPE }).newInstance(new Object[] { Boolean.valueOf(flag) }))); return ((avf) (obj)); } static Object a(int i1, int j1, Object obj, avf avf1) { Object obj1 = obj; // 0 0:aload_2 // 1 1:astore 4 if(obj == null) //* 2 3:aload_2 //* 3 4:ifnonnull 13 obj1 = avf1.a(); // 4 7:aload_3 // 5 8:invokevirtual #131 <Method Object avf.a()> // 6 11:astore 4 avf1.a(obj1, i1, j1); // 7 13:aload_3 // 8 14:aload 4 // 9 16:iload_0 // 10 17:iload_1 // 11 18:i2l // 12 19:invokevirtual #134 <Method void avf.a(Object, int, long)> return obj1; // 13 22:aload 4 // 14 24:areturn } static Object a(int i1, List list, asr asr1, Object obj, avf avf1) { if(asr1 == null) //* 0 0:aload_2 //* 1 1:ifnonnull 6 return obj; // 2 4:aload_3 // 3 5:areturn Object obj1; if(list instanceof RandomAccess) //* 4 6:aload_1 //* 5 7:instanceof #137 <Class RandomAccess> //* 6 10:ifeq 137 { int i2 = list.size(); // 7 13:aload_1 // 8 14:invokeinterface #52 <Method int List.size()> // 9 19:istore 7 int j1 = 0; // 10 21:iconst_0 // 11 22:istore 5 int l1 = 0; // 12 24:iconst_0 // 13 25:istore 6 for(; j1 < i2; j1++) //* 14 27:iload 5 //* 15 29:iload 7 //* 16 31:icmpge 110 { int j2 = ((Integer)list.get(j1)).intValue(); // 17 34:aload_1 // 18 35:iload 5 // 19 37:invokeinterface #74 <Method Object List.get(int)> // 20 42:checkcast #139 <Class Integer> // 21 45:invokevirtual #142 <Method int Integer.intValue()> // 22 48:istore 8 if(asr1.a(j2)) //* 23 50:aload_2 //* 24 51:iload 8 //* 25 53:invokeinterface #147 <Method boolean asr.a(int)> //* 26 58:ifeq 91 { if(j1 != l1) //* 27 61:iload 5 //* 28 63:iload 6 //* 29 65:icmpeq 82 list.set(l1, ((Object) (Integer.valueOf(j2)))); // 30 68:aload_1 // 31 69:iload 6 // 32 71:iload 8 // 33 73:invokestatic #150 <Method Integer Integer.valueOf(int)> // 34 76:invokeinterface #154 <Method Object List.set(int, Object)> // 35 81:pop l1++; // 36 82:iload 6 // 37 84:iconst_1 // 38 85:iadd // 39 86:istore 6 } else //* 40 88:goto 101 { obj = a(i1, j2, obj, avf1); // 41 91:iload_0 // 42 92:iload 8 // 43 94:aload_3 // 44 95:aload 4 // 45 97:invokestatic #156 <Method Object a(int, int, Object, avf)> // 46 100:astore_3 } } // 47 101:iload 5 // 48 103:iconst_1 // 49 104:iadd // 50 105:istore 5 //* 51 107:goto 27 obj1 = obj; // 52 110:aload_3 // 53 111:astore 9 if(l1 != i2) //* 54 113:iload 6 //* 55 115:iload 7 //* 56 117:icmpeq 206 { list.subList(l1, i2).clear(); // 57 120:aload_1 // 58 121:iload 6 // 59 123:iload 7 // 60 125:invokeinterface #160 <Method List List.subList(int, int)> // 61 130:invokeinterface #163 <Method void List.clear()> return obj; // 62 135:aload_3 // 63 136:areturn } } else { Iterator iterator = list.iterator(); // 64 137:aload_1 // 65 138:invokeinterface #167 <Method Iterator List.iterator()> // 66 143:astore 10 list = ((List) (obj)); // 67 145:aload_3 // 68 146:astore_1 do { obj1 = ((Object) (list)); // 69 147:aload_1 // 70 148:astore 9 if(!iterator.hasNext()) break; // 71 150:aload 10 // 72 152:invokeinterface #173 <Method boolean Iterator.hasNext()> // 73 157:ifeq 206 int k1 = ((Integer)iterator.next()).intValue(); // 74 160:aload 10 // 75 162:invokeinterface #176 <Method Object Iterator.next()> // 76 167:checkcast #139 <Class Integer> // 77 170:invokevirtual #142 <Method int Integer.intValue()> // 78 173:istore 5 if(!asr1.a(k1)) //* 79 175:aload_2 //* 80 176:iload 5 //* 81 178:invokeinterface #147 <Method boolean asr.a(int)> //* 82 183:ifne 147 { list = ((List) (a(i1, k1, ((Object) (list)), avf1))); // 83 186:iload_0 // 84 187:iload 5 // 85 189:aload_1 // 86 190:aload 4 // 87 192:invokestatic #156 <Method Object a(int, int, Object, avf)> // 88 195:astore_1 iterator.remove(); // 89 196:aload 10 // 90 198:invokeinterface #179 <Method void Iterator.remove()> } } while(true); // 91 203:goto 147 } return obj1; // 92 206:aload 9 // 93 208:areturn } public static void a(int i1, List list, awf awf1) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 21 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 21 awf1.a(i1, list); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:invokeinterface #188 <Method void awf.a(int, List)> // 9 21:return } public static void a(int i1, List list, awf awf1, aun aun) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.a(i1, list, aun); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:aload_3 // 9 17:invokeinterface #192 <Method void awf.a(int, List, aun)> // 10 22:return } public static void a(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.g(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #197 <Method void awf.g(int, List, boolean)> // 10 22:return } static void a(aru aru1, Object obj, Object obj1) { obj1 = ((Object) (aru1.a(obj1))); // 0 0:aload_0 // 1 1:aload_2 // 2 2:invokevirtual #203 <Method arx aru.a(Object)> // 3 5:astore_2 if(!((arx) (obj1)).b()) //* 4 6:aload_2 //* 5 7:invokevirtual #207 <Method boolean arx.b()> //* 6 10:ifne 22 aru1.b(obj).a(((arx) (obj1))); // 7 13:aload_0 // 8 14:aload_1 // 9 15:invokevirtual #209 <Method arx aru.b(Object)> // 10 18:aload_2 // 11 19:invokevirtual #212 <Method void arx.a(arx)> // 12 22:return } static void a(atq atq1, Object obj, Object obj1, long l1) { avm.a(obj, l1, atq1.a(avm.f(obj, l1), avm.f(obj1, l1))); // 0 0:aload_1 // 1 1:lload_3 // 2 2:aload_0 // 3 3:aload_1 // 4 4:lload_3 // 5 5:invokestatic #219 <Method Object avm.f(Object, long)> // 6 8:aload_2 // 7 9:lload_3 // 8 10:invokestatic #219 <Method Object avm.f(Object, long)> // 9 13:invokeinterface #224 <Method Object atq.a(Object, Object)> // 10 18:invokestatic #227 <Method void avm.a(Object, long, Object)> // 11 21:return } static void a(avf avf1, Object obj, Object obj1) { avf1.a(obj, avf1.c(avf1.b(obj), avf1.b(obj1))); // 0 0:aload_0 // 1 1:aload_1 // 2 2:aload_0 // 3 3:aload_0 // 4 4:aload_1 // 5 5:invokevirtual #231 <Method Object avf.b(Object)> // 6 8:aload_0 // 7 9:aload_2 // 8 10:invokevirtual #231 <Method Object avf.b(Object)> // 9 13:invokevirtual #233 <Method Object avf.c(Object, Object)> // 10 16:invokevirtual #236 <Method void avf.a(Object, Object)> // 11 19:return } public static void a(Class class1) { if(!((Class) (com/google/android/gms/internal/ads/ash)).isAssignableFrom(class1)) //* 0 0:ldc1 #239 <Class ash> //* 1 2:aload_0 //* 2 3:invokevirtual #243 <Method boolean Class.isAssignableFrom(Class)> //* 3 6:ifne 36 { Class class2 = a; // 4 9:getstatic #17 <Field Class a> // 5 12:astore_1 if(class2 != null) //* 6 13:aload_1 //* 7 14:ifnull 36 if(class2.isAssignableFrom(class1)) //* 8 17:aload_1 //* 9 18:aload_0 //* 10 19:invokevirtual #243 <Method boolean Class.isAssignableFrom(Class)> //* 11 22:ifeq 26 return; // 12 25:return else throw new IllegalArgumentException("Message classes must extend GeneratedMessage or GeneratedMessageLite"); // 13 26:new #245 <Class IllegalArgumentException> // 14 29:dup // 15 30:ldc1 #247 <String "Message classes must extend GeneratedMessage or GeneratedMessageLite"> // 16 32:invokespecial #250 <Method void IllegalArgumentException(String)> // 17 35:athrow } // 18 36:return } static boolean a(Object obj, Object obj1) { return obj == obj1 || obj != null && obj.equals(obj1); // 0 0:aload_0 // 1 1:aload_1 // 2 2:if_acmpeq 22 // 3 5:aload_0 // 4 6:ifnull 20 // 5 9:aload_0 // 6 10:aload_1 // 7 11:invokevirtual #255 <Method boolean Object.equals(Object)> // 8 14:ifeq 20 // 9 17:goto 22 // 10 20:iconst_0 // 11 21:ireturn // 12 22:iconst_1 // 13 23:ireturn } static int b(int i1, List list) { int k1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 int j1 = 0; // 3 7:iconst_0 // 4 8:istore_2 if(k1 == 0) //* 5 9:iload_3 //* 6 10:ifne 15 return 0; // 7 13:iconst_0 // 8 14:ireturn k1 *= arh.e(i1); // 9 15:iload_3 // 10 16:iload_0 // 11 17:invokestatic #56 <Method int arh.e(int)> // 12 20:imul // 13 21:istore_3 i1 = j1; // 14 22:iload_2 // 15 23:istore_0 j1 = k1; // 16 24:iload_3 // 17 25:istore_2 for(; i1 < list.size(); i1++) //* 18 26:iload_0 //* 19 27:aload_1 //* 20 28:invokeinterface #52 <Method int List.size()> //* 21 33:icmpge 59 j1 += arh.b((aqq)list.get(i1)); // 22 36:iload_2 // 23 37:aload_1 // 24 38:iload_0 // 25 39:invokeinterface #74 <Method Object List.get(int)> // 26 44:checkcast #63 <Class aqq> // 27 47:invokestatic #66 <Method int arh.b(aqq)> // 28 50:iadd // 29 51:istore_2 // 30 52:iload_0 // 31 53:iconst_1 // 32 54:iadd // 33 55:istore_0 //* 34 56:goto 26 return j1; // 35 59:iload_2 // 36 60:ireturn } static int b(int i1, List list, aun aun) { int l1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 5 int j1 = 0; // 3 8:iconst_0 // 4 9:istore_3 if(l1 == 0) //* 5 10:iload 5 //* 6 12:ifne 17 return 0; // 7 15:iconst_0 // 8 16:ireturn int k1 = 0; // 9 17:iconst_0 // 10 18:istore 4 for(; j1 < l1; j1++) //* 11 20:iload_3 //* 12 21:iload 5 //* 13 23:icmpge 53 k1 += arh.c(i1, (atv)list.get(j1), aun); // 14 26:iload 4 // 15 28:iload_0 // 16 29:aload_1 // 17 30:iload_3 // 18 31:invokeinterface #74 <Method Object List.get(int)> // 19 36:checkcast #42 <Class atv> // 20 39:aload_2 // 21 40:invokestatic #257 <Method int arh.c(int, atv, aun)> // 22 43:iadd // 23 44:istore 4 // 24 46:iload_3 // 25 47:iconst_1 // 26 48:iadd // 27 49:istore_3 //* 28 50:goto 20 return k1; // 29 53:iload 4 // 30 55:ireturn } static int b(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return b(list) + j1 * arh.e(i1); // 7 13:aload_1 // 8 14:invokestatic #259 <Method int b(List)> // 9 17:iload_3 // 10 18:iload_0 // 11 19:invokestatic #56 <Method int arh.e(int)> // 12 22:imul // 13 23:iadd // 14 24:ireturn } static int b(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof ati) //* 11 19:aload_0 //* 12 20:instanceof #87 <Class ati> //* 13 23:ifeq 59 { list = ((List) ((ati)list)); // 14 26:aload_0 // 15 27:checkcast #87 <Class ati> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.e(((ati) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #90 <Method long ati.b(int)> // 28 47:invokestatic #261 <Method int arh.e(long)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.e(((Long)list.get(i2)).longValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #95 <Class Long> // 50 82:invokevirtual #99 <Method long Long.longValue()> // 51 85:invokestatic #261 <Method int arh.e(long)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } public static avf b() { return c; // 0 0:getstatic #24 <Field avf c> // 1 3:areturn } public static void b(int i1, List list, awf awf1) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 21 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 21 awf1.b(i1, list); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:invokeinterface #263 <Method void awf.b(int, List)> // 9 21:return } public static void b(int i1, List list, awf awf1, aun aun) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.b(i1, list, aun); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:aload_3 // 9 17:invokeinterface #265 <Method void awf.b(int, List, aun)> // 10 22:return } public static void b(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.f(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #267 <Method void awf.f(int, List, boolean)> // 10 22:return } static int c(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return c(list) + j1 * arh.e(i1); // 7 13:aload_1 // 8 14:invokestatic #269 <Method int c(List)> // 9 17:iload_3 // 10 18:iload_0 // 11 19:invokestatic #56 <Method int arh.e(int)> // 12 22:imul // 13 23:iadd // 14 24:ireturn } static int c(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof ati) //* 11 19:aload_0 //* 12 20:instanceof #87 <Class ati> //* 13 23:ifeq 59 { list = ((List) ((ati)list)); // 14 26:aload_0 // 15 27:checkcast #87 <Class ati> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.f(((ati) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #90 <Method long ati.b(int)> // 28 47:invokestatic #271 <Method int arh.f(long)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.f(((Long)list.get(i2)).longValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #95 <Class Long> // 50 82:invokevirtual #99 <Method long Long.longValue()> // 51 85:invokestatic #271 <Method int arh.f(long)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } public static avf c() { return d; // 0 0:getstatic #31 <Field avf d> // 1 3:areturn } public static void c(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.c(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #273 <Method void awf.c(int, List, boolean)> // 10 22:return } static int d(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return d(list) + j1 * arh.e(i1); // 7 13:aload_1 // 8 14:invokestatic #275 <Method int d(List)> // 9 17:iload_3 // 10 18:iload_0 // 11 19:invokestatic #56 <Method int arh.e(int)> // 12 22:imul // 13 23:iadd // 14 24:ireturn } static int d(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof asn) //* 11 19:aload_0 //* 12 20:instanceof #277 <Class asn> //* 13 23:ifeq 59 { list = ((List) ((asn)list)); // 14 26:aload_0 // 15 27:checkcast #277 <Class asn> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.k(((asn) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #279 <Method int asn.b(int)> // 28 47:invokestatic #282 <Method int arh.k(int)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.k(((Integer)list.get(i2)).intValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #139 <Class Integer> // 50 82:invokevirtual #142 <Method int Integer.intValue()> // 51 85:invokestatic #282 <Method int arh.k(int)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } private static Class d() { Throwable throwable; Class class1; try { class1 = Class.forName("com.google.protobuf.GeneratedMessage"); // 0 0:ldc2 #284 <String "com.google.protobuf.GeneratedMessage"> // 1 3:invokestatic #288 <Method Class Class.forName(String)> // 2 6:astore_0 } //* 3 7:aload_0 //* 4 8:areturn //* 5 9:aconst_null //* 6 10:areturn // Misplaced declaration of an exception variable catch(Throwable throwable) { return null; } return class1; //* 7 11:astore_0 //* 8 12:goto 9 } public static void d(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.d(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #290 <Method void awf.d(int, List, boolean)> // 10 22:return } static int e(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return e(list) + j1 * arh.e(i1); // 7 13:aload_1 // 8 14:invokestatic #292 <Method int e(List)> // 9 17:iload_3 // 10 18:iload_0 // 11 19:invokestatic #56 <Method int arh.e(int)> // 12 22:imul // 13 23:iadd // 14 24:ireturn } static int e(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof asn) //* 11 19:aload_0 //* 12 20:instanceof #277 <Class asn> //* 13 23:ifeq 59 { list = ((List) ((asn)list)); // 14 26:aload_0 // 15 27:checkcast #277 <Class asn> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.f(((asn) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #279 <Method int asn.b(int)> // 28 47:invokestatic #294 <Method int arh.f(int)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.f(((Integer)list.get(i2)).intValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #139 <Class Integer> // 50 82:invokevirtual #142 <Method int Integer.intValue()> // 51 85:invokestatic #294 <Method int arh.f(int)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } private static Class e() { Throwable throwable; Class class1; try { class1 = Class.forName("com.google.protobuf.UnknownFieldSetSchema"); // 0 0:ldc2 #296 <String "com.google.protobuf.UnknownFieldSetSchema"> // 1 3:invokestatic #288 <Method Class Class.forName(String)> // 2 6:astore_0 } //* 3 7:aload_0 //* 4 8:areturn //* 5 9:aconst_null //* 6 10:areturn // Misplaced declaration of an exception variable catch(Throwable throwable) { return null; } return class1; //* 7 11:astore_0 //* 8 12:goto 9 } public static void e(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.n(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #299 <Method void awf.n(int, List, boolean)> // 10 22:return } static int f(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return f(list) + j1 * arh.e(i1); // 7 13:aload_1 // 8 14:invokestatic #301 <Method int f(List)> // 9 17:iload_3 // 10 18:iload_0 // 11 19:invokestatic #56 <Method int arh.e(int)> // 12 22:imul // 13 23:iadd // 14 24:ireturn } static int f(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof asn) //* 11 19:aload_0 //* 12 20:instanceof #277 <Class asn> //* 13 23:ifeq 59 { list = ((List) ((asn)list)); // 14 26:aload_0 // 15 27:checkcast #277 <Class asn> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.g(((asn) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #279 <Method int asn.b(int)> // 28 47:invokestatic #303 <Method int arh.g(int)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.g(((Integer)list.get(i2)).intValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #139 <Class Integer> // 50 82:invokevirtual #142 <Method int Integer.intValue()> // 51 85:invokestatic #303 <Method int arh.g(int)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } public static void f(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.e(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #305 <Method void awf.e(int, List, boolean)> // 10 22:return } static int g(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return g(list) + j1 * arh.e(i1); // 7 13:aload_1 // 8 14:invokestatic #307 <Method int g(List)> // 9 17:iload_3 // 10 18:iload_0 // 11 19:invokestatic #56 <Method int arh.e(int)> // 12 22:imul // 13 23:iadd // 14 24:ireturn } static int g(List list) { int j2 = list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore 4 int k1 = 0; // 3 8:iconst_0 // 4 9:istore_2 int l1 = 0; // 5 10:iconst_0 // 6 11:istore_3 if(j2 == 0) //* 7 12:iload 4 //* 8 14:ifne 19 return 0; // 9 17:iconst_0 // 10 18:ireturn if(list instanceof asn) //* 11 19:aload_0 //* 12 20:instanceof #277 <Class asn> //* 13 23:ifeq 59 { list = ((List) ((asn)list)); // 14 26:aload_0 // 15 27:checkcast #277 <Class asn> // 16 30:astore_0 int i1 = 0; // 17 31:iconst_0 // 18 32:istore_1 do { k1 = i1; // 19 33:iload_1 // 20 34:istore_2 if(l1 >= j2) break; // 21 35:iload_3 // 22 36:iload 4 // 23 38:icmpge 97 i1 += arh.h(((asn) (list)).b(l1)); // 24 41:iload_1 // 25 42:aload_0 // 26 43:iload_3 // 27 44:invokevirtual #279 <Method int asn.b(int)> // 28 47:invokestatic #310 <Method int arh.h(int)> // 29 50:iadd // 30 51:istore_1 l1++; // 31 52:iload_3 // 32 53:iconst_1 // 33 54:iadd // 34 55:istore_3 } while(true); // 35 56:goto 33 } else { int j1 = 0; // 36 59:iconst_0 // 37 60:istore_1 int i2 = k1; // 38 61:iload_2 // 39 62:istore_3 do { k1 = j1; // 40 63:iload_1 // 41 64:istore_2 if(i2 >= j2) break; // 42 65:iload_3 // 43 66:iload 4 // 44 68:icmpge 97 j1 += arh.h(((Integer)list.get(i2)).intValue()); // 45 71:iload_1 // 46 72:aload_0 // 47 73:iload_3 // 48 74:invokeinterface #74 <Method Object List.get(int)> // 49 79:checkcast #139 <Class Integer> // 50 82:invokevirtual #142 <Method int Integer.intValue()> // 51 85:invokestatic #310 <Method int arh.h(int)> // 52 88:iadd // 53 89:istore_1 i2++; // 54 90:iload_3 // 55 91:iconst_1 // 56 92:iadd // 57 93:istore_3 } while(true); // 58 94:goto 63 } return k1; // 59 97:iload_2 // 60 98:ireturn } public static void g(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.l(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #313 <Method void awf.l(int, List, boolean)> // 10 22:return } static int h(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return j1 * arh.i(i1, 0); // 7 13:iload_3 // 8 14:iload_0 // 9 15:iconst_0 // 10 16:invokestatic #317 <Method int arh.i(int, int)> // 11 19:imul // 12 20:ireturn } static int h(List list) { return list.size() << 2; // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:iconst_2 // 3 7:ishl // 4 8:ireturn } public static void h(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.a(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #319 <Method void awf.a(int, List, boolean)> // 10 22:return } static int i(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return j1 * arh.g(i1, 0L); // 7 13:iload_3 // 8 14:iload_0 // 9 15:lconst_0 // 10 16:invokestatic #322 <Method int arh.g(int, long)> // 11 19:imul // 12 20:ireturn } static int i(List list) { return list.size() << 3; // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:iconst_3 // 3 7:ishl // 4 8:ireturn } public static void i(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.j(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #325 <Method void awf.j(int, List, boolean)> // 10 22:return } static int j(int i1, List list, boolean flag) { int j1 = list.size(); // 0 0:aload_1 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:istore_3 if(j1 == 0) //* 3 7:iload_3 //* 4 8:ifne 13 return 0; // 5 11:iconst_0 // 6 12:ireturn else return j1 * arh.b(i1, true); // 7 13:iload_3 // 8 14:iload_0 // 9 15:iconst_1 // 10 16:invokestatic #328 <Method int arh.b(int, boolean)> // 11 19:imul // 12 20:ireturn } static int j(List list) { return list.size(); // 0 0:aload_0 // 1 1:invokeinterface #52 <Method int List.size()> // 2 6:ireturn } public static void j(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.m(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #331 <Method void awf.m(int, List, boolean)> // 10 22:return } public static void k(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.b(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #333 <Method void awf.b(int, List, boolean)> // 10 22:return } public static void l(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.k(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #335 <Method void awf.k(int, List, boolean)> // 10 22:return } public static void m(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.h(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #337 <Method void awf.h(int, List, boolean)> // 10 22:return } public static void n(int i1, List list, awf awf1, boolean flag) { if(list != null && !list.isEmpty()) //* 0 0:aload_1 //* 1 1:ifnull 22 //* 2 4:aload_1 //* 3 5:invokeinterface #183 <Method boolean List.isEmpty()> //* 4 10:ifne 22 awf1.i(i1, list, flag); // 5 13:aload_2 // 6 14:iload_0 // 7 15:aload_1 // 8 16:iload_3 // 9 17:invokeinterface #339 <Method void awf.i(int, List, boolean)> // 10 22:return } private static final Class a = d(); private static final avf b = a(false); private static final avf c = a(true); private static final avf d = new avh(); static { // 0 0:invokestatic #15 <Method Class d()> // 1 3:putstatic #17 <Field Class a> // 2 6:iconst_0 // 3 7:invokestatic #20 <Method avf a(boolean)> // 4 10:putstatic #22 <Field avf b> // 5 13:iconst_1 // 6 14:invokestatic #20 <Method avf a(boolean)> // 7 17:putstatic #24 <Field avf c> // 8 20:new #26 <Class avh> // 9 23:dup // 10 24:invokespecial #29 <Method void avh()> // 11 27:putstatic #31 <Field avf d> //* 12 30:return } }
package alg; import stdio.StdOut; /****************************************************************************** * Compilation: javac Autoboxing.java * Execution: java Autoboxing * Dependencies: StdOut.java * * Headaches with autoboxing. Why does the following code behave * the way it does? * * % java Autoboxing * 42 and 42 are incomparable * 43 == 43 * 142 and 142 are incomparable * * Explanation: * - cmp(new Integer(42), new Integer(42)) * first and second refer to different objects holding the value 42; * thus, first != second. * The expressions (first < second) and (first > second) autounbox * the Integer values to 42, so neither expression is true. * * - cmp(43, 43) * the values 43 are autoboxed to the same Integer object because * Java's Integer implementation caches the objects associated with * the values -128 to 127 and the valueOf() method uses the cached * values (and valueOf() gets called by the autoboxing). * * - cmp(142, 142) * the values 142 are autoboxed to different Integer objects. * * * ******************************************************************************/ public class Autoboxing { public static void cmp(Integer first, Integer second) { if (first < second) StdOut.printf("%d < %d\n", first, second); else if (first == second) StdOut.printf("%d == %d\n", first, second); else if (first > second) StdOut.printf("%d > %d\n", first, second); else StdOut.printf("%d and %d are incomparable\n", first, second); } public static void main(String[] args) { cmp(new Integer(42), 43); cmp(new Integer(42), new Integer(42)); cmp(43, 43); cmp(142, 142); double x1 = 0.0, y1 = -0.0; Double a1 = x1, b1 = y1; StdOut.println(x1 == y1); StdOut.println(a1.equals(b1)); double x2 = 0.0/0.0, y2 = 0.0/0.0; Double a2 = x2, b2 = y2; StdOut.println(x2 != y2); StdOut.println(!a2.equals(b2)); } }
/******************** Code to return detected objects to the CameraActivity.java ******************/ public void closeCameraActivity(View view) { Intent intent = getIntent(); Bundle resultsBundle = new Bundle(); int counter=1; for(Classifier.Recognition recognition:resultSet){ resultsBundle.putString("Object" + counter++, "Class:" + recognition.getTitle() + ", Confidence:" + recognition.getConfidence()*100); } intent.putExtra("resultsBundle", resultsBundle); if (intent.getStringExtra("FromActivity").equals("MainActivity")) { intent.setClass(DetectorActivity.this, ListActivity.class); intent.putExtra("FromActivity", "DetectorActivity"); startActivity(intent); } else if (intent.getStringExtra("FromActivity").equals("ListActivity")) { setResult(RESULT_OK, intent); } finish(); }
/// Calculates the number of LP tokens that correspond to an amount of token B. /// This does not take withdraw fees into account. pub fn lp_tokens_for_b_excluding_fees(&self, token_b_amount: u64) -> Option<u64> { (token_b_amount as u128) .checked_mul(self.supply as u128)? .checked_div(self.token_b as u128)? .to_u64() }
// NewFromParams creates a new Team from params func (c TeamsClient) NewFromParams(params TeamParams) (Team, ClientError) { t := Team{} err := c.client.NewRequest("POST", teamsEndpoint, params, &t, nil) return t, err }
/** * St * @author masao */ public class StringGrid { final public GridOverList<String> body; public StringGrid(String picture, Integer... dim) { this(picture, TList.sof(dim)); } public StringGrid(String picture, TList<Integer> dim) { GridCoord axis = new GridCoord(dim.map(i->new GridAxis(0,i))); this.body = new GridOverList<>(axis,TList.set(toCharacters(picture)).map(c->c.toString())); } public TList<List<Integer>> path() { return new BareBoneGridSolver(body.axis).solve(find("S"), find("G"), blocked()); } public GridOverList<String> solve() { return body.fix().multiCSet(path().seek(1).seek(-1), p->"#"); } public Collection<List<Integer>> blocked() { return body.axis.i2a(body.filterAt(s->s.equals("X"))); } public List<Integer> find(String x) { TList<List<Integer>> addresses = body.addressOf(s->s.equals(x)); if (addresses.isEmpty()) throw new RuntimeException("no "+x+" point is designated."); return addresses.get(0); } @Override public String toString() { return body.toString(); } }
// New creates an instance of *Limiter. Configure the Limiter with the options specified. // Example: limiter.New(4, WithTimeout(5)) func New(limit int, options ...Option) *Limiter { l := &Limiter{ Limit: limit, } for _, o := range options { o(l) } return l }
A Model to Implement and Secure Online Documentation using Blockchain The past decade has seen a tremendous rise in the use of blockchains for facilitating a trust based decentralized financial framework, in hope of eliminating dependency on core institutions such as banks. A blockchain stores a chronological record for a document detailing all the alterations that have been made to it since its inception. This data contained within this document may be anything, e.g. a lease document, personal files, etc. Smart documents can be used everywhere within the industry or an enterprise. Although blockchain based structures are much less eco-friendly in comparison to the ones with a database-based approach. Inter Planetary File System can be used to remotely store massive quantities of statistical data that may be involved in a blockchain transaction and address it in the actual chain by an absolute, everlasting link. Each report and all the blocks in a chain are assigned a unique fingerprint known as a cryptographic hash. This virtual cryptographic signature is used to verify legitimacy of each block. In this paper with the help of IPFS, along with other encryption methods we propose a secure and tamper proof model for blockchain based financial transactions.
/** * Converts a Java long to a Little Endian int on 8 bytes. * * @param c is the value to parse. * @return the parsing result */ public static byte[] toByteArray(long c) { try { ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(bos); dos.writeLong(c); dos.flush(); byte[] data = bos.toByteArray(); return data; } catch(IOException e) { throw new IOError(e); } }
def provide_color_scheme_builder(self): if self._color_scheme_builder is not None: return self._color_scheme_builder _, color_scheme_data, _ = self.provide_fake_color_scheme_data() self._color_scheme_builder = ColorSchemeBuilder( color_scheme_data, self.provide_fake_color_scheme_writer(), self._settings.experimental.asynchronosly_update_color_scheme) return self._color_scheme_builder
I once talked to a World War II veteran about the experience of attending college after coming home, and asked if it was jarring to sit next to those who never served. I wondered if veterans huddled together under the umbrella of mutual understanding and thought less of civilians who never shouldered a rifle. His answer was surprising. They were proud of their time in uniform, he said, but for many, the war interrupted their lives, and education was a return to normalcy. Instead of a victory lap, they were more interested in getting back on track. Perhaps the fact that many Iraq and Afghanistan veterans I’ve talked to take precisely the opposite view is due, in part, to current civilian attitudes. I call it the pedestal problem. For many civilians, veterans are thought about in the span of football halftime shows, where we gawk at troops standing on the sidelines while the camera lingers on flags flapping in the wind. The word hero is tossed around and abused to the point of banality. The good intentions of civilians are rarely in question, but detached admiration has always been a stand-in for the impulse to do “something” for veterans. So civilians clap at football games. They applaud returning troops in airports in outward appreciation, satisfied with their magnanimous deeds. Then—for many of them--it’s back to more tangible concerns, like the fragile economy. A veteran’s résumé might come across your desk, but if you’re like more than half of these surveyed hiring managers, you harbor suspicion and fear about post-traumatic stress episodes in the workplace. That’s the problem with viewing something on a pedestal: you can only see one side at a time, and rarely at depth. It produces extremes—the valiant hero or the downtrodden, unstable veteran. Thank you for your service. But we’re looking for someone else. The view from the pedestal has warped the perspective many veterans hold when they leave the service. We call ourselves warriors and worship the Spartan ethos, but don’t always appreciate that our society is detached from our conflicts the way Sparta never was. The superiority complex on the part of volunteer troops and veterans was described as far back as 1997 and has compounded with two conflicts and countless trying moments that have fed our pride. One could walk the earth for decades before finding a sense of worth and belonging that equaled what some of us experienced while in service. From the first time we walk into a recruiter’s office to our last out-processing brief, we’re told recognition is exactly what we can expect. We’re ahead of the curve. We can lead and train. We are, we tell ourselves, more prepared than our civilian cohorts. Unfortunately, many of us have found this isn’t the case, but that chip on our shoulder doesn’t tend to fall off. It leads to frustrating feelings that civilians don’t value our experiences in the workplace or the classroom.
S = input() T = input() S_list = list(S) T_list = list(T) t_cnt = 0 t_cnt_max = 0 start_pos = 0 #print(S_list, T_list) for i in range(len(S_list) - len(T_list)+1): t_cnt = 0 for j in range(len(T_list)): if S_list[i+j] == T_list[j]: t_cnt += 1 start_pos = i #print(S_list[i+j], T_list[j], t_cnt, j) if t_cnt_max < t_cnt: t_cnt_max = t_cnt print(len(T_list) - t_cnt_max)
Modeling Personality Influence over Preference Model’s Parameter Values . Abstract. Every enterprise faces the necessity of taking decisions in their everyday activities. The decision process is leaded by one or several Decision Makers (DMs) who are in charge of deciding the proper combination of resources to achieve the goals of the company. Among others, approaches based on outranking relations have been developed to create computable models that aid in the selection of the best alternatives in the decision process. Such models require the elicitation of a set of parameters, a task that so far still has areas of opportunity for research. This work presents an architecture that integrates the personality as a factor that influence the parameter values of preference models based on outranking relations. It is pointed out that with the development of this architecture, it will be possible to integrate personality models in direct parameter elicitation strategies for preference models. Introduction Nowadays enterprises face the necessity of taking many decisions related to their everyday activities. Usually, these decisions are made over real world problems whose solutions contribute to the achievement of desire results. The most common strategy followed to provide assistance in such situations is through the development of optimizations models that reflects the needs of an enterprise but MOL2NET, 2018, 4, http://sciforum.net/conference/mol2net-04 2 also that incorporates particular preferences of the Decision Maker (DM) who is meant to select the best solution, e.g. see (Roy, 1996). According to (Coello, 2000), the strategies that have been used so far to model preferences are based on goal attainment, utility functions, preference relations, outranking and fuzzy logic. Of particular interest are the outranking approaches which exploits outranking relations to give answer to Multi-objective Optimization Problems (MOPs), see (Roy, 1996). Such approaches have allowed the development of computable preference models, based on a predefined set of parameters that reflect the interests of a DM. The most practical way that can be used to set the parameter values for that preference model is through Preference Disaggregation Methods (Rangel-Valdez et al., 2015;Cruz-Reyes et al., 2017), which are methods that based on a battery of examples provided by the DM elicits the entire set of parameters. Through recent work, it has been observed that the preferences of a DM could be strongly influenced by abstract aspects of his/her personality (cf. Morales-Rodriguez, 2007), e.g. his/her level of tolerance. In this direction, the personality and the emotional state are relevant elements that provide in some way an added value to these preferences, and they could produce more descriptive and approximate solutions to the reasoning of the individual (Paranagama et al., 1997). Hence, it is acceptable to think that the personality should influence the values of the parameters that define a specific preference model that is used to characterize a DM, e.g. the ones used in (Fernandez et al., 2013). The main objective in the proposed research is to develop an architecture that allow future study of the impact of the influence of personality on preferences within a decisional context. The work presents the architecture, which takes as input aspects of a personality and consider them as possible modifiers of the parameter values for preference models. The considered parameters of preference models were those of ELECTRE III (cf. Roy, 1991), a well-known model that takes into account the preferences of a DM to compare different alternatives characterized by several criteria. The proposed architecture considers the use of the most recurrent models of the theory of personality in order to provide a computable measure of the tolerance of an individual. As a result, the research set some insight on how to adjust tolerance and intensity, two features from the personality of a DM, from existing personality models, and how to use the values to modify the credibility threshold used by ELECTRE III. . Materials and Methods The present research proposes an architecture based on the rational agent proposed by (Russell, Norvig & Davis, 2010). The architecture has the basic principles of any intelligent system, it has reactivity, internal state and goals, also it can estimate the degree of satisfaction of a state in the agent that could be used to select among distinct valid actions; also, the architecture incorporates autonomy, sociability and reasoning, required to involve agents that are not only reactive such as Embodied Conversational Agents (cf. Picard, 1997;Buisine, 2004). The architecture proposed should be of deliberative type because the agent involved must have: 1) A social and intentional behavior; 2) A high cognitive level; and, 3) The capacity of symbolically representing the real world, as indicated in (Iglesias-Fernández, 1998;Wooldridge, 2002). The type of reasoning of such architecture can be related to the outranking relational preference approaches (through Preference Disaggregation Methods) with the personality models based on personality traits (e.g. OCEAN) and personality types (e.g. MBTI and KTS). Basically, the components required for the MOL2NET, 2018, 4, http://sciforum.net/conference/mol2net-04 3 architecture in order to achieve the expected behavior are based in four modules: perception, deliberative process, influence personality model, and interface. Figure 1 presents the interrelation of all these elements; note the use of a knowledge database whose purpose is serve as memory of the agent and maintain the key information of the DM that is been model, for example, the reference set that can be used to indirectly approximate its preferences. Details on each module are provided in the remaining of this section. Figure 1. Architecture to integrate influence of personality into preferences of a DM. Perception Module. This module is in charge of receiving the input information necessary from the DM in order to support the decision process. The first time that the agent derived from the architecture is in contact with the customer, the knowledge database should be filled with personality information retrieved from the DM such as the reference set, i.e. the set of examples that provide guide in the preferences of an individual. Personality Module. This module has the personality model, and it provides the behavior of a DM. This model is the one that contains the methodology that should be applied to change of parameter values of a preference model due to particular aspects of personality such as tolerance or intensity. Where, the personalities models applied are FFM-OCEAN, MBTI and KTS. Deliberative Module. This module conceals a selection process in which, with the aid of metaheuristics such as evolutionary algorithms that can incorporate preferences models (cf. Fernandez et al., 2013), it approximates the region of interest of the decision maker and try to resolve a set of alternatives for the decision process in question (defined in the Perception Module). This module can be seen as the optimization process in charge of delivering a solution for a specific problem (cf. Rivera-Zarate, 2011). MOL2NET, 2018, 4, http://sciforum.net/conference/mol2net-04 4 Interface Module. This is the module that will allow interaction between the agent and the real DM. Let's note that the architecture defines an agent that simulates the influence of personality of a real DM over its preferences, then there should exists a way in which the real DM transfer such knowledge, and it is through the interface module. Results and Discussion The development of the previous architecture opens the lines of research to the investigation of models that compute the values for different characteristics of the personality. As an example, the intensity and the tolerance are two personality aspects that can be measure through the models Five Factor Model (OCEAN) (McCrae & John, 1992), MBTI (The-Myers-Briggs-Foundation, 2017) and KTS (Keirsey, 1998). In order to provide an example Equation 1 shows the computation of the intensity from the values obtained by the IPIP-NEO questionnaire, there the value of intensity is the total sum of each level of the five factors of OCEAN Model. ( 1 ) In Equation 1, In is the intensity of a personality profile, k refers to each of the five factors, and Lk is the value of each OCEAN factor k. The value of In represent the degree of intensity in percentage. Table 1 show an example of a possible result, each column represents the value of each OCEAN factor for a strict personality profile, and the last column the value of In, the value of 60% might indicate a relaxed personality. Tabla 1. Example of the use of OCEAN model to compute the intensity of the personality. The value of intensity can be a main component in the definition of other features of the personality, for example the tolerance. The tolerance might be the flexibility of an individual to choose an alternative that is distinct from its preference. It is suggested that the intensity In can be integrated into a mathematical model in order to estimate a proper value of the tolerance; this estimation could be further supported by other personalities models, as the KTS model. Table 2 show an example of how the result could be; there, eight decision profiles are defined and each of them related with the different temperaments defined by KTS (see second column). By making a uniform distribution of the proportion, each temperament is related with a tolerance value, third column where the extreme values 1 and 0 represent complete and absence of tolerance, respectively. With the values of tolerance given in Table 2, one proposal to associate them with the credibility threshold  of ELECTRE III could be an indicator like this  = 1+ 2 ; this indicator represents the influence of tolerances in inverse proportions with respect to the credibility index. The latter comment makes sense if one observes that the higher value of  is the stronger the support information for a comparison in ELECTRE III must exist, hence a tolerant person might be related with low values of , MOL2NET, 2018, 4, http://sciforum.net/conference/mol2net-04 5 and a non-tolerant one with high values. The model previously proposed is an example of approaches that can be made in order to relate personality with preference of a DM in computable models. Conclusions The present work is a brief research about the development of an architecture that integrates personality as a factor of influences in preference models. The application of existing personality models to define computable values for features of personality, such as tolerance and intensity can be further extended to modify parameters' values of preference model, and, in a way serve as an alternative direct elicitation strategy that could be a good competitor for others such as preference disaggregation analysis. The present research will continue the line of work of successfully integrate the personality influence in preference and study if there is or not a significant difference in it.
<filename>Source/Urho3D/Resource/XMLElement.h // // Copyright (c) 2008-2020 the Urho3D project. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #pragma once #include "../Container/Ptr.h" #include "../Core/Variant.h" #include "../Math/BoundingBox.h" #include "../Math/Rect.h" namespace pugi { struct xml_node_struct; class xpath_node; class xpath_node_set; class xpath_query; class xpath_variable_set; } namespace Urho3D { class XMLFile; class XPathQuery; class XPathResultSet; /// Element in an XML file. class URHO3D_API XMLElement { public: /// Construct null element. XMLElement(); /// Construct with document and node pointers. XMLElement(XMLFile* file, pugi::xml_node_struct* node); /// Construct from xpath query result set. XMLElement(XMLFile* file, const XPathResultSet* resultSet, const pugi::xpath_node* xpathNode, unsigned xpathResultIndex); /// Copy-construct from another element. XMLElement(const XMLElement& rhs); /// Destruct. ~XMLElement(); /// Assignment operator. XMLElement& operator =(const XMLElement& rhs); /// Create a child element. XMLElement CreateChild(const String& name); /// Create a child element. XMLElement CreateChild(const char* name); /// Return the first child element with name or create if does not exist. XMLElement GetOrCreateChild(const String& name); /// Return the first child element with name or create if does not exist. XMLElement GetOrCreateChild(const char* name); /// Append element. If asCopy is set to true then original element is copied and appended, otherwise specified element is appended. bool AppendChild(XMLElement element, bool asCopy = false); /// Remove element from its parent. bool Remove(); /// Remove a child element. Return true if successful. bool RemoveChild(const XMLElement& element); /// Remove a child element by name. Return true if successful. bool RemoveChild(const String& name); /// Remove a child element by name. Return true if successful. bool RemoveChild(const char* name); /// Remove child elements of certain name, or all child elements if name is empty. Return true if successful. bool RemoveChildren(const String& name = String::EMPTY); /// Remove child elements of certain name, or all child elements if name is empty. Return true if successful. bool RemoveChildren(const char* name); /// Remove an attribute by name. Return true if successful. bool RemoveAttribute(const String& name = String::EMPTY); /// Remove an attribute by name. Return true if successful. bool RemoveAttribute(const char* name); /// Select an element/attribute using XPath query. XMLElement SelectSingle(const String& query, pugi::xpath_variable_set* variables = nullptr) const; /// Select an element/attribute using XPath query. XMLElement SelectSinglePrepared(const XPathQuery& query) const; /// Select elements/attributes using XPath query. XPathResultSet Select(const String& query, pugi::xpath_variable_set* variables = nullptr) const; /// Select elements/attributes using XPath query. XPathResultSet SelectPrepared(const XPathQuery& query) const; /// Set the value for an inner node in the following format <node>value</node>. /// @property bool SetValue(const String& value); /// Set the value for an inner node in the following format <node>value</node>. Must be used on the <node> element. bool SetValue(const char* value); /// Set an attribute. bool SetAttribute(const String& name, const String& value); /// Set an attribute. bool SetAttribute(const char* name, const char* value); /// Set an attribute. Only valid if it is an attribute only XPath query result. bool SetAttribute(const String& value); /// Set an attribute. Only valid if it is an attribute only XPath query result. bool SetAttribute(const char* value); /// Set a bool attribute. bool SetBool(const String& name, bool value); /// Set a BoundingBox attribute. bool SetBoundingBox(const BoundingBox& value); /// Set a buffer attribute. bool SetBuffer(const String& name, const void* data, unsigned size); /// Set a buffer attribute. bool SetBuffer(const String& name, const PODVector<unsigned char>& value); /// Set a color attribute. bool SetColor(const String& name, const Color& value); /// Set a float attribute. bool SetFloat(const String& name, float value); /// Set a double attribute. bool SetDouble(const String& name, double value); /// Set an unsigned integer attribute. bool SetUInt(const String& name, unsigned value); /// Set an integer attribute. bool SetInt(const String& name, int value); /// Set an unsigned long long integer attribute. bool SetUInt64(const String& name, unsigned long long value); /// Set a long long integer attribute. bool SetInt64(const String& name, long long value); /// Set an IntRect attribute. bool SetIntRect(const String& name, const IntRect& value); /// Set an IntVector2 attribute. bool SetIntVector2(const String& name, const IntVector2& value); /// Set an IntVector3 attribute. bool SetIntVector3(const String& name, const IntVector3& value); /// Set a Rect attribute. bool SetRect(const String& name, const Rect& value); /// Set a quaternion attribute. bool SetQuaternion(const String& name, const Quaternion& value); /// Set a string attribute. bool SetString(const String& name, const String& value); /// Set a variant attribute. bool SetVariant(const Variant& value); /// Set a variant attribute excluding the type. bool SetVariantValue(const Variant& value); /// Set a resource reference attribute. bool SetResourceRef(const ResourceRef& value); /// Set a resource reference list attribute. bool SetResourceRefList(const ResourceRefList& value); /// Set a variant vector attribute. Creates child elements as necessary. bool SetVariantVector(const VariantVector& value); /// Set a string vector attribute. Creates child elements as necessary. bool SetStringVector(const StringVector& value); /// Set a variant map attribute. Creates child elements as necessary. bool SetVariantMap(const VariantMap& value); /// Set a Vector2 attribute. bool SetVector2(const String& name, const Vector2& value); /// Set a Vector3 attribute. bool SetVector3(const String& name, const Vector3& value); /// Set a Vector4 attribute. bool SetVector4(const String& name, const Vector4& value); /// Set a float, Vector or Matrix attribute stored in a variant. bool SetVectorVariant(const String& name, const Variant& value); /// Set a Matrix3 attribute. bool SetMatrix3(const String& name, const Matrix3& value); /// Set a Matrix3x4 attribute. bool SetMatrix3x4(const String& name, const Matrix3x4& value); /// Set a Matrix4 attribute. bool SetMatrix4(const String& name, const Matrix4& value); /// Return whether does not refer to an element or an XPath node. /// @property{get_isNull} bool IsNull() const; /// Return whether refers to an element or an XPath node. /// @property bool NotNull() const; /// Return true if refers to an element or an XPath node. explicit operator bool() const; /// Return element name (or attribute name if it is an attribute only XPath query result). /// @property String GetName() const; /// Return whether has a child element. bool HasChild(const String& name) const; /// Return whether has a child element. bool HasChild(const char* name) const; /// Return child element, or null if missing. XMLElement GetChild(const String& name = String::EMPTY) const; /// Return child element, or null if missing. XMLElement GetChild(const char* name) const; /// Return next sibling element. XMLElement GetNext(const String& name = String::EMPTY) const; /// Return next sibling element. XMLElement GetNext(const char* name) const; /// Return parent element. /// @property XMLElement GetParent() const; /// Return number of attributes. /// @property unsigned GetNumAttributes() const; /// Return whether has an attribute. bool HasAttribute(const String& name) const; /// Return whether has an attribute. bool HasAttribute(const char* name) const; /// Return inner value, or empty if missing for nodes like <node>value</node>. /// @property String GetValue() const; /// Return attribute, or empty if missing. String GetAttribute(const String& name = String::EMPTY) const; /// Return attribute, or empty if missing. String GetAttribute(const char* name) const; /// Return attribute as C string, or null if missing. const char* GetAttributeCString(const char* name) const; /// Return attribute in lowercase, or empty if missing. String GetAttributeLower(const String& name) const; /// Return attribute in lowercase, or empty if missing. String GetAttributeLower(const char* name) const; /// Return attribute in lowercase, or empty if missing. String GetAttributeUpper(const String& name) const; /// Return attribute in lowercase, or empty if missing. String GetAttributeUpper(const char* name) const; /// Return names of all attributes. Vector<String> GetAttributeNames() const; /// Return bool attribute, or false if missing. bool GetBool(const String& name) const; /// Return buffer attribute, or empty if missing. PODVector<unsigned char> GetBuffer(const String& name) const; /// Copy buffer attribute into a supplied buffer. Return true if buffer was large enough. bool GetBuffer(const String& name, void* dest, unsigned size) const; /// Return bounding box attribute, or empty if missing. BoundingBox GetBoundingBox() const; /// Return a color attribute, or default if missing. Color GetColor(const String& name) const; /// Return a float attribute, or zero if missing. float GetFloat(const String& name) const; /// Return a double attribute, or zero if missing. double GetDouble(const String& name) const; /// Return an unsigned integer attribute, or zero if missing. unsigned GetUInt(const String& name) const; /// Return an integer attribute, or zero if missing. int GetInt(const String& name) const; /// Return an unsigned long long integer attribute, or zero if missing. unsigned long long GetUInt64(const String& name) const; /// Return a long long integer attribute, or zero if missing. long long GetInt64(const String& name) const; /// Return an IntRect attribute, or default if missing. IntRect GetIntRect(const String& name) const; /// Return an IntVector2 attribute, or default if missing. IntVector2 GetIntVector2(const String& name) const; /// Return an IntVector3 attribute, or default if missing. IntVector3 GetIntVector3(const String& name) const; /// Return a Rect attribute, or default if missing. Rect GetRect(const String& name) const; /// Return a quaternion attribute, or default if missing. Quaternion GetQuaternion(const String& name) const; /// Return a variant attribute, or empty if missing. Variant GetVariant() const; /// Return a variant attribute with static type. Variant GetVariantValue(VariantType type) const; /// Return a resource reference attribute, or empty if missing. ResourceRef GetResourceRef() const; /// Return a resource reference list attribute, or empty if missing. ResourceRefList GetResourceRefList() const; /// Return a variant vector attribute, or empty if missing. VariantVector GetVariantVector() const; /// Return a string vector attribute, or empty if missing. StringVector GetStringVector() const; /// Return a variant map attribute, or empty if missing. VariantMap GetVariantMap() const; /// Return a Vector2 attribute, or zero vector if missing. Vector2 GetVector2(const String& name) const; /// Return a Vector3 attribute, or zero vector if missing. Vector3 GetVector3(const String& name) const; /// Return a Vector4 attribute, or zero vector if missing. Vector4 GetVector4(const String& name) const; /// Return any Vector attribute as Vector4. Missing coordinates will be zero. Vector4 GetVector(const String& name) const; /// Return a float, Vector or Matrix attribute as Variant. Variant GetVectorVariant(const String& name) const; /// Return a Matrix3 attribute, or zero matrix if missing. Matrix3 GetMatrix3(const String& name) const; /// Return a Matrix3x4 attribute, or zero matrix if missing. Matrix3x4 GetMatrix3x4(const String& name) const; /// Return a Matrix4 attribute, or zero matrix if missing. Matrix4 GetMatrix4(const String& name) const; /// Return XML file. /// @property XMLFile* GetFile() const; /// Return pugixml xml_node_struct. pugi::xml_node_struct* GetNode() const { return node_; } /// Return XPath query result set. const XPathResultSet* GetXPathResultSet() const { return xpathResultSet_; } /// Return pugixml xpath_node. const pugi::xpath_node* GetXPathNode() const { return xpathNode_; } /// Return current result index. unsigned GetXPathResultIndex() const { return xpathResultIndex_; } /// Return next XPath query result. Only valid when this instance of XMLElement is itself one of the query result in the result set. /// @property XMLElement NextResult() const; /// Empty XMLElement. static const XMLElement EMPTY; private: /// XML file. WeakPtr<XMLFile> file_; /// Pugixml node. pugi::xml_node_struct* node_; /// XPath query result set. const XPathResultSet* xpathResultSet_; /// Pugixml xpath_node. const pugi::xpath_node* xpathNode_; /// Current XPath query result index (used internally to advance to subsequent query result). mutable unsigned xpathResultIndex_; }; /// XPath query result set. class URHO3D_API XPathResultSet { public: /// Construct empty result set. XPathResultSet(); /// Construct with result set from XPath query. XPathResultSet(XMLFile* file, pugi::xpath_node_set* resultSet); /// Copy-construct. XPathResultSet(const XPathResultSet& rhs); /// Destruct. ~XPathResultSet(); /// Assignment operator. XPathResultSet& operator =(const XPathResultSet& rhs); /// Return the n-th result in the set. Call XMLElement::GetNextResult() to get the subsequent result in the set. /// Note: The XPathResultSet return value must be stored in a lhs variable to ensure the underlying xpath_node_set* is still valid while performing XPathResultSet::FirstResult(), XPathResultSet::operator [], and XMLElement::NextResult(). XMLElement operator [](unsigned index) const; /// Return the first result in the set. Call XMLElement::GetNextResult() to get the subsequent result in the set. /// Note: The XPathResultSet return value must be stored in a lhs variable to ensure the underlying xpath_node_set* is still valid while performing XPathResultSet::FirstResult(), XPathResultSet::operator [], and XMLElement::NextResult(). /// @property XMLElement FirstResult(); /// Return size of result set. /// @property unsigned Size() const; /// Return whether result set is empty. /// @property bool Empty() const; /// Return pugixml xpath_node_set. pugi::xpath_node_set* GetXPathNodeSet() const { return resultSet_; } private: /// XML file. WeakPtr<XMLFile> file_; /// Pugixml xpath_node_set. pugi::xpath_node_set* resultSet_; }; /// XPath query. class URHO3D_API XPathQuery { public: /// Construct empty. XPathQuery(); /// Construct XPath query object with query string and variable string. The variable string format is "name1:type1,name2:type2,..." where type is one of "Bool", "Float", "String", "ResultSet". explicit XPathQuery(const String& queryString, const String& variableString = String::EMPTY); /// Destruct. ~XPathQuery(); /// Bind query object with variable set. void Bind(); /// Add/Set a bool variable. Return true if successful. bool SetVariable(const String& name, bool value); /// Add/Set a float variable. Return true if successful. bool SetVariable(const String& name, float value); /// Add/Set a string variable. Return true if successful. bool SetVariable(const String& name, const String& value); /// Add/Set a string variable. Return true if successful. bool SetVariable(const char* name, const char* value); /// Add/Set an XPath query result set variable. Return true if successful. bool SetVariable(const String& name, const XPathResultSet& value); /// Set XPath query string and variable string. The variable string format is "name1:type1,name2:type2,..." where type is one of "Bool", "Float", "String", "ResultSet". bool SetQuery(const String& queryString, const String& variableString = String::EMPTY, bool bind = true); /// Clear by removing all variables and XPath query object. void Clear(); /// Evaluate XPath query and expecting a boolean return value. bool EvaluateToBool(const XMLElement& element) const; /// Evaluate XPath query and expecting a float return value. float EvaluateToFloat(const XMLElement& element) const; /// Evaluate XPath query and expecting a string return value. String EvaluateToString(const XMLElement& element) const; /// Evaluate XPath query and expecting an XPath query result set as return value. /// Note: The XPathResultSet return value must be stored in a lhs variable to ensure the underlying xpath_node_set* is still valid while performing XPathResultSet::FirstResult(), XPathResultSet::operator [], and XMLElement::NextResult(). XPathResultSet Evaluate(const XMLElement& element) const; /// Return query string. /// @property String GetQuery() const { return queryString_; } /// Return pugixml xpath_query. pugi::xpath_query* GetXPathQuery() const { return query_.Get(); } /// Return pugixml xpath_variable_set. pugi::xpath_variable_set* GetXPathVariableSet() const { return variables_.Get(); } private: /// XPath query string. String queryString_; /// Pugixml xpath_query. UniquePtr<pugi::xpath_query> query_; /// Pugixml xpath_variable_set. UniquePtr<pugi::xpath_variable_set> variables_; }; }
def parse_file(file_name, dataset_path): file_name = dataset_path + file_name with open(file_name) as f: data = f.read() data = data.replace("\n","") data = data.replace("\'",'') datas = data.split(";") headers = [] start_data = 0 for i, line in enumerate(datas): if line != 'DATA': headers.append(line) else: start_data = i + 1 break for i in range(0, start_data): datas.pop(0) return headers, datas def parse_header(file_name): file_name = "C:/Users/Computer/PycharmProjects/graphStepSimilarity/Datasets/" + file_name with open(file_name) as f: data = f.read() datas = data.split("\n") headers = [] for i, line in enumerate(datas): if line != 'DATA;': headers.append(line) else: break return headers
/** * <pre> * Hide this op by putting it into an internal namespace (or whatever * is appropriate in the target language). * </pre> * * <code>bool hide = 3;</code> */ public Builder clearHide() { hide_ = false; onChanged(); return this; }
/*************** CVDenseSetup **************************************** This routine does the setup operations for the dense linear solver. It makes a decision whether or not to call the Jacobian evaluation routine based on various state variables, and if not it uses the saved copy. In any case, it constructs the Newton matrix M = I - gamma*J, updates counters, and calls the dense LU factorization routine. **********************************************************************/ static int CVDenseSetup(CVodeMem cv_mem, int convfail, N_Vector ypred, N_Vector fpred, booleantype * jcurPtr, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3) { booleantype jbad, jok; realtype dgamma; integertype ier; CVDenseMem cvdense_mem; cvdense_mem = (CVDenseMem) lmem; dgamma = ABS((gamma / gammap) - ONE); jbad = (nst == 0) || (nst > nstlj + CVD_MSBJ) || ((convfail == FAIL_BAD_J) && (dgamma < CVD_DGMAX)) || (convfail == FAIL_OTHER); jok = !jbad; if (jok) { *jcurPtr = FALSE; DenseCopy(savedJ, M); } else { nje++; if (iopt != NULL) iopt[DENSE_NJE] = nje; nstlj = nst; *jcurPtr = TRUE; DenseZero(M); jac(N, M, f, f_data, tn, ypred, fpred, ewt, h, uround, J_data, &nfe, vtemp1, vtemp2, vtemp3); DenseCopy(M, savedJ); } DenseScale(-gamma, M); DenseAddI(M); ier = DenseFactor(M, pivots); if (ier > 0) return (1); return (0); }
/** * Two of the testcases in the yangtools/yang-data-impl are leveraged (with modification) to create * the serialization of NormalizedNode using the ProtocolBuffer * * @syedbahm * */ public class NormalizedNodeXmlConverterTest { private static final Logger logger = LoggerFactory .getLogger(NormalizedNodeXmlConverterTest.class); public static final String NAMESPACE = "urn:opendaylight:params:xml:ns:yang:controller:test"; private static Date revision; private ContainerNode expectedNode; private ContainerSchemaNode containerNode; private String xmlPath; static { try { revision = new SimpleDateFormat("yyyy-MM-dd").parse("2014-03-13"); } catch (ParseException e) { throw new RuntimeException(e); } } public static DataSchemaNode getSchemaNode(final SchemaContext context, final String moduleName, final String childNodeName) { for (Module module : context.getModules()) { if (module.getName().equals(moduleName)) { DataSchemaNode found = findChildNode(module.getChildNodes(), childNodeName); Preconditions.checkState(found != null, "Unable to find %s", childNodeName); return found; } } throw new IllegalStateException("Unable to find child node " + childNodeName); } static DataSchemaNode findChildNode( final Collection<DataSchemaNode> children, final String name) { List<DataNodeContainer> containers = Lists.newArrayList(); for (DataSchemaNode dataSchemaNode : children) { if (dataSchemaNode.getQName().getLocalName().equals(name)) { return dataSchemaNode; } if (dataSchemaNode instanceof DataNodeContainer) { containers.add((DataNodeContainer) dataSchemaNode); } else if (dataSchemaNode instanceof ChoiceNode) { containers.addAll(((ChoiceNode) dataSchemaNode).getCases()); } } for (DataNodeContainer container : containers) { DataSchemaNode retVal = findChildNode(container.getChildNodes(), name); if (retVal != null) { return retVal; } } return null; } public static YangInstanceIdentifier.NodeIdentifier getNodeIdentifier( final String localName) { return new YangInstanceIdentifier.NodeIdentifier(QName.create( URI.create(NAMESPACE), revision, localName)); } public static YangInstanceIdentifier.AugmentationIdentifier getAugmentIdentifier( final String... childNames) { Set<QName> qn = Sets.newHashSet(); for (String childName : childNames) { qn.add(getNodeIdentifier(childName).getNodeType()); } return new YangInstanceIdentifier.AugmentationIdentifier(qn); } public static ContainerNode augmentChoiceExpectedNode() { DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> b = Builders.containerBuilder(); b.withNodeIdentifier(getNodeIdentifier("container")); b.withChild(Builders .choiceBuilder() .withNodeIdentifier(getNodeIdentifier("ch2")) .withChild( Builders.leafBuilder() .withNodeIdentifier(getNodeIdentifier("c2Leaf")).withValue("2") .build()) .withChild( Builders .choiceBuilder() .withNodeIdentifier(getNodeIdentifier("c2DeepChoice")) .withChild( Builders .leafBuilder() .withNodeIdentifier( getNodeIdentifier("c2DeepChoiceCase1Leaf2")) .withValue("2").build()).build()).build()); b.withChild(Builders .choiceBuilder() .withNodeIdentifier(getNodeIdentifier("ch3")) .withChild( Builders.leafBuilder() .withNodeIdentifier(getNodeIdentifier("c3Leaf")).withValue("3") .build()).build()); b.withChild(Builders .augmentationBuilder() .withNodeIdentifier(getAugmentIdentifier("augLeaf")) .withChild( Builders.leafBuilder() .withNodeIdentifier(getNodeIdentifier("augLeaf")) .withValue("augment").build()).build()); b.withChild(Builders .augmentationBuilder() .withNodeIdentifier(getAugmentIdentifier("ch")) .withChild( Builders .choiceBuilder() .withNodeIdentifier(getNodeIdentifier("ch")) .withChild( Builders.leafBuilder() .withNodeIdentifier(getNodeIdentifier("c1Leaf")) .withValue("1").build()) .withChild( Builders .augmentationBuilder() .withNodeIdentifier( getAugmentIdentifier("c1Leaf_AnotherAugment", "deepChoice")) .withChild( Builders .leafBuilder() .withNodeIdentifier( getNodeIdentifier("c1Leaf_AnotherAugment")) .withValue("1").build()) .withChild( Builders .choiceBuilder() .withNodeIdentifier( getNodeIdentifier("deepChoice")) .withChild( Builders .leafBuilder() .withNodeIdentifier( getNodeIdentifier("deepLeafc1")) .withValue("1").build()).build()) .build()).build()).build()); return b.build(); } public void init(final String yangPath, final String xmlPath, final ContainerNode expectedNode) throws Exception { SchemaContext schema = parseTestSchema(yangPath); this.xmlPath = xmlPath; this.containerNode = (ContainerSchemaNode) getSchemaNode(schema, "test", "container"); this.expectedNode = expectedNode; } SchemaContext parseTestSchema(final String yangPath) throws Exception { YangParserImpl yangParserImpl = new YangParserImpl(); InputStream stream = NormalizedNodeXmlConverterTest.class.getResourceAsStream(yangPath); ArrayList<InputStream> al = new ArrayList<InputStream>(); al.add(stream); Set<Module> modules = yangParserImpl.parseYangModelsFromStreams(al); return yangParserImpl.resolveSchemaContext(modules); } @Test public void testConversionWithAugmentChoice() throws Exception { init("/augment_choice.yang", "/augment_choice.xml", augmentChoiceExpectedNode()); Document doc = loadDocument(xmlPath); ContainerNode built = DomToNormalizedNodeParserFactory .getInstance(DomUtils.defaultValueCodecProvider()) .getContainerNodeParser() .parse(Collections.singletonList(doc.getDocumentElement()), containerNode); if (expectedNode != null) { junit.framework.Assert.assertEquals(expectedNode, built); } logger.info("{}", built); Iterable<Element> els = DomFromNormalizedNodeSerializerFactory .getInstance(XmlDocumentUtils.getDocument(), DomUtils.defaultValueCodecProvider()) .getContainerNodeSerializer().serialize(containerNode, built); Element el = els.iterator().next(); XMLUnit.setIgnoreWhitespace(true); XMLUnit.setIgnoreComments(true); System.out.println(toString(doc.getDocumentElement())); System.out.println(toString(el)); new Diff(XMLUnit.buildControlDocument(toString(doc.getDocumentElement())), XMLUnit.buildTestDocument(toString(el))).similar(); } private static ContainerNode listLeafListWithAttributes() { DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> b = Builders.containerBuilder(); b.withNodeIdentifier(getNodeIdentifier("container")); CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = Builders.mapBuilder().withNodeIdentifier(getNodeIdentifier("list")); Map<QName, Object> predicates = Maps.newHashMap(); predicates.put(getNodeIdentifier("uint32InList").getNodeType(), 3L); DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode> list1Builder = Builders.mapEntryBuilder().withNodeIdentifier( new YangInstanceIdentifier.NodeIdentifierWithPredicates( getNodeIdentifier("list").getNodeType(), predicates)); NormalizedNodeBuilder<YangInstanceIdentifier.NodeIdentifier, Object, LeafNode<Object>> uint32InListBuilder = Builders.leafBuilder().withNodeIdentifier( getNodeIdentifier("uint32InList")); list1Builder.withChild(uint32InListBuilder.withValue(3L).build()); listBuilder.withChild(list1Builder.build()); b.withChild(listBuilder.build()); NormalizedNodeBuilder<YangInstanceIdentifier.NodeIdentifier, Object, LeafNode<Object>> booleanBuilder = Builders.leafBuilder().withNodeIdentifier(getNodeIdentifier("boolean")); booleanBuilder.withValue(false); b.withChild(booleanBuilder.build()); ListNodeBuilder<Object, LeafSetEntryNode<Object>> leafListBuilder = Builders.leafSetBuilder().withNodeIdentifier( getNodeIdentifier("leafList")); NormalizedNodeBuilder<YangInstanceIdentifier.NodeWithValue, Object, LeafSetEntryNode<Object>> leafList1Builder = Builders.leafSetEntryBuilder().withNodeIdentifier( new YangInstanceIdentifier.NodeWithValue(getNodeIdentifier( "leafList").getNodeType(), "a")); leafList1Builder.withValue("a"); leafListBuilder.withChild(leafList1Builder.build()); b.withChild(leafListBuilder.build()); return b.build(); } @Test public void testConversionWithAttributes() throws Exception { init("/test.yang", "/simple_xml_with_attributes.xml", listLeafListWithAttributes()); Document doc = loadDocument(xmlPath); ContainerNode built = DomToNormalizedNodeParserFactory .getInstance(DomUtils.defaultValueCodecProvider()) .getContainerNodeParser() .parse(Collections.singletonList(doc.getDocumentElement()), containerNode); if (expectedNode != null) { junit.framework.Assert.assertEquals(expectedNode, built); } logger.info("{}", built); Iterable<Element> els = DomFromNormalizedNodeSerializerFactory .getInstance(XmlDocumentUtils.getDocument(), DomUtils.defaultValueCodecProvider()) .getContainerNodeSerializer().serialize(containerNode, built); Element el = els.iterator().next(); XMLUnit.setIgnoreWhitespace(true); XMLUnit.setIgnoreComments(true); System.out.println(toString(doc.getDocumentElement())); System.out.println(toString(el)); new Diff(XMLUnit.buildControlDocument(toString(doc.getDocumentElement())), XMLUnit.buildTestDocument(toString(el))).similar(); } private Document loadDocument(final String xmlPath) throws Exception { InputStream resourceAsStream = NormalizedNodeXmlConverterTest.class.getResourceAsStream(xmlPath); Document currentConfigElement = readXmlToDocument(resourceAsStream); Preconditions.checkNotNull(currentConfigElement); return currentConfigElement; } private static final DocumentBuilderFactory BUILDERFACTORY; static { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setNamespaceAware(true); factory.setCoalescing(true); factory.setIgnoringElementContentWhitespace(true); factory.setIgnoringComments(true); BUILDERFACTORY = factory; } private Document readXmlToDocument(final InputStream xmlContent) throws IOException, SAXException { DocumentBuilder dBuilder; try { dBuilder = BUILDERFACTORY.newDocumentBuilder(); } catch (ParserConfigurationException e) { throw new RuntimeException("Failed to parse XML document", e); } Document doc = dBuilder.parse(xmlContent); doc.getDocumentElement().normalize(); return doc; } public static String toString(final Element xml) { try { Transformer transformer = TransformerFactory.newInstance().newTransformer(); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); StreamResult result = new StreamResult(new StringWriter()); DOMSource source = new DOMSource(xml); transformer.transform(source, result); return result.getWriter().toString(); } catch (IllegalArgumentException | TransformerFactoryConfigurationError | TransformerException e) { throw new RuntimeException("Unable to serialize xml element " + xml, e); } } @Test public void testConversionToNormalizedXml() throws Exception { SimpleNormalizedNodeMessage.NormalizedNodeXml nnXml = EncoderDecoderUtil.encode(parseTestSchema("/augment_choice.yang"), augmentChoiceExpectedNode()); Document expectedDoc = loadDocument("/augment_choice.xml"); Document convertedDoc = EncoderDecoderUtil.factory.newDocumentBuilder().parse( new ByteArrayInputStream(nnXml.getXmlString().getBytes("utf-8"))); System.out.println(toString(convertedDoc.getDocumentElement())); XMLUnit.setIgnoreWhitespace(true); XMLUnit.setIgnoreComments(true); new Diff(XMLUnit.buildControlDocument(toString(expectedDoc .getDocumentElement())), XMLUnit.buildTestDocument(toString(convertedDoc.getDocumentElement()))) .similar(); System.out.println(toString(expectedDoc.getDocumentElement())); } @Test public void testConversionFromXmlToNormalizedNode() throws Exception { SimpleNormalizedNodeMessage.NormalizedNodeXml nnXml = EncoderDecoderUtil.encode(parseTestSchema("/test.yang"), listLeafListWithAttributes()); Document expectedDoc = loadDocument("/simple_xml_with_attributes.xml"); Document convertedDoc = EncoderDecoderUtil.factory.newDocumentBuilder().parse( new ByteArrayInputStream(nnXml.getXmlString().getBytes("utf-8"))); System.out.println(toString(convertedDoc.getDocumentElement())); XMLUnit.setIgnoreWhitespace(true); XMLUnit.setIgnoreComments(true); new Diff(XMLUnit.buildControlDocument(toString(expectedDoc .getDocumentElement())), XMLUnit.buildTestDocument(toString(convertedDoc.getDocumentElement()))) .similar(); System.out.println(toString(expectedDoc.getDocumentElement())); // now we will try to convert xml back to normalize node. ContainerNode cn = (ContainerNode) EncoderDecoderUtil.decode( parseTestSchema("/test.yang"), nnXml); junit.framework.Assert.assertEquals(listLeafListWithAttributes(), cn); } }
def reset_rule_params(): bmtr._rule_params = copy.copy(rule_params_backup)
Server Deployments Week 4 Recap As always, please refer to the deployment thread in the forums for the very latest news. On Tuesday January 21st, the Main channel received the server maintenance project previously on the three RC channels, which includes the forced delay in llLoadURL being reduced to 0.1 seconds; the new LSL functions for uniformly scaling linksets (integer llScaleByFactor(float scaling_factor), float llGetMinScaleFactor(),float llGetMaxScaleFactor() ). The package also includes server-side support for STORM-68 and STORM-1831, both of which are awaiting viewer-side releases On Wendesday January 22nd, all three RC channels all received a new server maintenance project, which contains a single fix for a crash mode. Server Deployments in Week 5 Details on the deployments for week 5 (commencing Monday January 27th) have yet to be finalised. However, it appears there will be a new server maintenance projects targeted at the RC channel, which Maestro Linden outlined during the Server Beta meeting on Thursday January 23rd: We’ll have another small maintenance project going out next week which includes another crash fix and a fix to llModifyLand(), [where] the bug is that calling it in a child prim modifies the wrong land. For example, if a child prim is offset by <8,4,0> from the root prim, then calling that function in the child prim will try to modify the terrain at <8,4,0> of the region, which may or may not work depending on who owns the parcel. The fix is to make it modify the land underneath the child prim (which of course follows the same permissions rules – you can only modify land owned by the script owner. Unless another project pops-up in the interim, it is likely this update will be deployed to all three RC channels in week 5. SL Viewer Updates A new Maintenance RC viewer appeared in the release channel on Thursday January 23rd. Version 3.6.14.285499 includes some 43 MAINT related fixes and updates, as listed in the release notes. All other viewer RC, project and the release viewer remain unchanged, per my notes in part one of this week’s report. HTTP Work – Monty Linden Monty attended the Server Beta meeting to provide some more information on the HTTP project work. “Basically, my hope is to move http operations to a domain where ping time has far less impact on experience as well as just doing HTTP better,” he said of the current work, the benefits of which are currently in an RC viewer – version 3.6.14.285253. “And that is happening.” (See Monty’s blog post on the subject.) HTTP Pipelining This work was slightly sidetracked as Monty got involved in issues around third-party libraries (see below). However, pipelining is seen as the first major step that will give the Lab some ping time independence, and it is likely that it will involve some server-side work. “The server work will be small, a change in fairness policies,” Monty stated by way of a broad explanation, “and throttle implementation but that isn’t set in stone yet.” Third-party Library Work The work on the third-party libraries has been covered in a number of recent HTTP project updates. These are the used in building the SL viewer, including zlib, libpng, openssl, ares, libcurl, boost and SDL, all of which Monty has been rebuilding, as well as “tweaking” colladadom, openjpeg, Google-mock and llqtwebkit. The aim is to ensure that these libraries are up-to-date, and are probably managed and maintained and correctly used throughout the viewer build process. This work should help, longer-term with any move by the Lab to 64-bit viewer builds, and should be of benefit to those TPVs already building 64-bit variants of their viewers. Webkit Woes Webkit is a third-party library used within the viewer for a number of tasks. For example, it powers the built-in web browser, and is used to display profiles (unless you’re using a viewer supporting legacy profiles). It is also used with like Media on a Prim (MOAP) and many in-world televisions. There have been an increasing number of issues with Webkit. The libraries used within SL are out-of-date, for example, something which has caused the Lab and TPVs a considerable amount of pain. More recently, users have been encountering issues when trying to view YouTube videos via the built-in browser or MOAP, reporting that they are seeing an error message informing them that “You’re using Safari browser on Windows that we’ll soon stop supporting” (BUG-4763 and FIRE-12642), or reporting they have sound but no video (FIRE-11057). An Adobe engineer has commented on the latter Firestorm JIRA, explaining the problem, and has indicated he has contacted Linden Lab as well. It’s unclear as to how this matter will be handled going forward. While Monty has prodded at Webkit as a part of his additional work on third-party libraries, and overall fix may not be that straightforward. As such, it appears the way forward in dealing with the video issues is currently unclear. Advertisements
#include <acknex.h> #include <default.c> #include "..\\Source\\tust.h" #define POINT_COUNT 5 VECTOR pt[POINT_COUNT]; void draw_mark(VECTOR *pos, COLOR *col) { draw_line(vector(pos.x - 1, pos.y, 0), NULL, 100); draw_line(vector(pos.x - 1, pos.y, 0), col, 100); draw_line(vector(pos.x + 2, pos.y, 0), col, 100); draw_line(vector(pos.x, pos.y - 1, 0), NULL, 100); draw_line(vector(pos.x, pos.y - 1, 0), col, 100); draw_line(vector(pos.x, pos.y + 2, 0), col, 100); } function on_space_event() { int i; for(i = 0; i < POINT_COUNT; i++) { vec_set(&pt[i], vector(random(screen_size.x), random(screen_size.y), 0)); } } function main() { wait(1); int i; for(i = 0; i < POINT_COUNT; i++) { vec_set(&pt[i], vector(random(screen_size.x), random(screen_size.y), 0)); } while(1) { draw_line(&pt[0], NULL, 100); for(i = 1; i < POINT_COUNT; i++) { draw_line(&pt[i], COLOR_BLUE, 100); } float f; draw_line(&pt[0], NULL, 100); for(f = 0; f <= 1; f += 0.01) { draw_line(vec_to_bezier(NULL, pt, POINT_COUNT, f), COLOR_RED, 100); } wait(1); } }
/* Removing messges was just an exercise in leaning Kafka - This will delete half of the messages if a certain limit is reached (assuming the offset increments by one). The proper way to do this is with 'log.retention.bytes' and 'log.retention.(hours/minutes/ms)' in the server.properties. And, if needed, 'kafka-configs.sh --alter' at runtime. */ private void removeMessages(String topic, long offset, int partition) { Properties props = new Properties(); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName()); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "dodex"); Long totalCount = 0l; try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) { List<TopicPartition> ll = new LinkedList<>(); ll.add (new TopicPartition(topic, partition)); consumer.assign(ll); Set<TopicPartition> assignment; while ((assignment = consumer.assignment()).isEmpty()) { consumer.poll(Duration.ofMillis(500)); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(assignment); final Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(assignment); assert endOffsets.size() == beginningOffsets.size(); assert endOffsets.keySet().equals(beginningOffsets.keySet()); totalCount = beginningOffsets.entrySet().stream().mapToLong(entry -> { TopicPartition tp = entry.getKey(); Long beginningOffset = entry.getValue(); Long endOffset = endOffsets.get(tp); return endOffset - beginningOffset; }).sum(); beginningOffsets.clear(); consumer.close(Duration.ofMillis(500)); } if (totalCount > KafkaEmitterDodex.getMessageLimit()) { long toDelete = offset - totalCount/2; TopicPartition tp = new TopicPartition(topic, partition); RecordsToDelete rtd = RecordsToDelete.beforeOffset(toDelete); Map<TopicPartition, RecordsToDelete> deleteRecords = new ConcurrentHashMap<>(); deleteRecords.put(tp, rtd); Properties config = new Properties(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); try { AdminClient ac = AdminClient.create(config); DeleteRecordsResult dr = ac.deleteRecords(deleteRecords); dr.all().get(1l, TimeUnit.SECONDS); logger.info("Approximate records deleted: {}", totalCount/2); ac.close(); } catch (InterruptedException | TimeoutException e) { e.printStackTrace(); } catch (ExecutionException e) { logger.error("Delete Messages Exception: {}", e.getMessage()); } } }
/* Handle the MRI IRP and IRPC pseudo-ops. */ void s_irp (int irpc) { char * eol; const char * file; unsigned int line; sb s; const char *err; sb out; file = as_where (&line); eol = find_end_of_line (input_line_pointer, 0); sb_build (&s, eol - input_line_pointer); sb_add_buffer (&s, input_line_pointer, eol - input_line_pointer); input_line_pointer = eol; sb_new (&out); err = expand_irp (irpc, 0, &s, &out, get_non_macro_line_sb); if (err != NULL) as_bad_where (file, line, "%s", err); sb_kill (&s); input_scrub_include_sb (&out, input_line_pointer, 1); sb_kill (&out); buffer_limit = input_scrub_next_buffer (&input_line_pointer); }
<filename>openraft/src/core/client.rs use std::sync::Arc; use anyhow::anyhow; use futures::future::TryFutureExt; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use maplit::btreeset; use tokio::time::timeout; use tokio::time::Duration; use tracing::Instrument; use crate::core::apply_to_state_machine; use crate::core::LeaderState; use crate::core::State; use crate::error::ClientReadError; use crate::error::ClientWriteError; use crate::error::QuorumNotEnough; use crate::raft::AppendEntriesRequest; use crate::raft::ClientWriteRequest; use crate::raft::ClientWriteResponse; use crate::raft::Entry; use crate::raft::EntryPayload; use crate::raft::RaftRespTx; use crate::replication::RaftEvent; use crate::AppData; use crate::AppDataResponse; use crate::MessageSummary; use crate::RaftNetwork; use crate::RaftStorage; use crate::StorageError; /// A wrapper around a ClientRequest which has been transformed into an Entry, along with its response channel. pub(super) struct ClientRequestEntry<D: AppData, R: AppDataResponse> { /// The Arc'd entry of the ClientRequest. /// /// This value is Arc'd so that it may be sent across thread boundaries for replication /// without having to clone the data payload itself. pub entry: Arc<Entry<D>>, /// The response channel for the request. pub tx: Option<RaftRespTx<ClientWriteResponse<R>, ClientWriteError>>, } impl<D: AppData, R: AppDataResponse> MessageSummary for ClientRequestEntry<D, R> { fn summary(&self) -> String { format!("entry:{}", self.entry.summary()) } } impl<'a, D: AppData, R: AppDataResponse, N: RaftNetwork<D>, S: RaftStorage<D, R>> LeaderState<'a, D, R, N, S> { /// Commit the initial entry which new leaders are obligated to create when first coming to power, per §8. #[tracing::instrument(level = "trace", skip(self))] pub(super) async fn commit_initial_leader_entry(&mut self) -> Result<(), StorageError> { let entry = self.core.append_payload_to_log(EntryPayload::Blank).await?; self.leader_report_metrics(); let cr_entry = ClientRequestEntry { entry: Arc::new(entry), tx: None, }; self.replicate_client_request(cr_entry).await?; Ok(()) } /// Handle client read requests. /// /// Spawn requests to all members of the cluster, include members being added in joint /// consensus. Each request will have a timeout, and we respond once we have a majority /// agreement from each config group. Most of the time, we will have a single uniform /// config group. /// /// From the spec (§8): /// Second, a leader must check whether it has been deposed before processing a read-only /// request (its information may be stale if a more recent leader has been elected). Raft /// handles this by having the leader exchange heartbeat messages with a majority of the /// cluster before responding to read-only requests. #[tracing::instrument(level = "trace", skip(self, tx))] pub(super) async fn handle_client_read_request(&mut self, tx: RaftRespTx<(), ClientReadError>) { // Setup sentinel values to track when we've received majority confirmation of leadership. let mem = &self.core.effective_membership.membership; let mut granted = btreeset! {self.core.id}; if mem.is_majority(&granted) { let _ = tx.send(Ok(())); return; } // Spawn parallel requests, all with the standard timeout for heartbeats. let mut pending = FuturesUnordered::new(); let membership = &self.core.effective_membership.membership; for (id, node) in self.nodes.iter() { if !membership.is_member(id) { continue; } let rpc = AppendEntriesRequest { term: self.core.current_term, leader_id: self.core.id, prev_log_id: node.matched, entries: vec![], leader_commit: self.core.committed, }; let target = *id; let network = self.core.network.clone(); let ttl = Duration::from_millis(self.core.config.heartbeat_interval); let task = tokio::spawn( async move { match timeout(ttl, network.send_append_entries(target, rpc)).await { Ok(Ok(data)) => Ok((target, data)), Ok(Err(err)) => Err((target, err)), Err(_timeout) => Err((target, anyhow!("timeout waiting for leadership confirmation"))), } } .instrument(tracing::debug_span!("spawn")), ) .map_err(move |err| (*id, err)); pending.push(task); } // Handle responses as they return. while let Some(res) = pending.next().await { let (target, data) = match res { Ok(Ok(res)) => res, Ok(Err((target, err))) => { tracing::error!(target, error=%err, "timeout while confirming leadership for read request"); continue; } Err((target, err)) => { tracing::error!(target, "{}", err); continue; } }; // If we receive a response with a greater term, then revert to follower and abort this request. if data.term != self.core.current_term { self.core.update_current_term(data.term, None); // TODO(xp): if receives error about a higher term, it should stop at once? self.core.set_target_state(State::Follower); } granted.insert(target); let mem = &self.core.effective_membership.membership; if mem.is_majority(&granted) { let _ = tx.send(Ok(())); return; } } // If we've hit this location, then we've failed to gather needed confirmations due to // request failures. let _ = tx.send(Err(QuorumNotEnough { cluster: self.core.effective_membership.membership.summary(), got: granted, } .into())); } /// Handle client write requests. #[tracing::instrument(level = "trace", skip(self, tx), fields(rpc=%rpc.summary()))] pub(super) async fn handle_client_write_request( &mut self, rpc: ClientWriteRequest<D>, tx: RaftRespTx<ClientWriteResponse<R>, ClientWriteError>, ) -> Result<(), StorageError> { let entry = self.core.append_payload_to_log(rpc.payload).await?; let entry = ClientRequestEntry { entry: Arc::new(entry), tx: Some(tx), }; self.leader_report_metrics(); self.replicate_client_request(entry).await?; Ok(()) } /// Begin the process of replicating the given client request. /// /// NOTE WELL: this routine does not wait for the request to actually finish replication, it /// merely beings the process. Once the request is committed to the cluster, its response will /// be generated asynchronously. #[tracing::instrument(level = "debug", skip(self, req), fields(req=%req.summary()))] pub(super) async fn replicate_client_request(&mut self, req: ClientRequestEntry<D, R>) -> Result<(), StorageError> { // Replicate the request if there are other cluster members. The client response will be // returned elsewhere after the entry has been committed to the cluster. let log_id = req.entry.log_id; let quorum_granted = self.core.effective_membership.membership.is_majority(&btreeset! {self.core.id}); if quorum_granted { assert!(self.core.committed < Some(log_id)); self.core.committed = Some(log_id); tracing::debug!(?self.core.committed, "update committed, no need to replicate"); self.leader_report_metrics(); self.client_request_post_commit(req).await?; } else { self.awaiting_committed.push(req); } for node in self.nodes.values() { let _ = node.repl_stream.repl_tx.send(( RaftEvent::Replicate { appended: log_id, committed: self.core.committed, }, tracing::debug_span!("CH"), )); } Ok(()) } /// Handle the post-commit logic for a client request. #[tracing::instrument(level = "debug", skip(self, req))] pub(super) async fn client_request_post_commit( &mut self, req: ClientRequestEntry<D, R>, ) -> Result<(), StorageError> { let entry = &req.entry; let apply_res = self.apply_entry_to_state_machine(entry).await?; self.send_response(entry, apply_res, req.tx).await; // Trigger log compaction if needed. self.core.trigger_log_compaction_if_needed(false); Ok(()) } #[tracing::instrument(level = "debug", skip(self, entry, resp, tx), fields(entry=%entry.summary()))] pub(super) async fn send_response( &mut self, entry: &Entry<D>, resp: R, tx: Option<RaftRespTx<ClientWriteResponse<R>, ClientWriteError>>, ) { let tx = match tx { None => return, Some(x) => x, }; let membership = if let EntryPayload::Membership(ref c) = entry.payload { Some(c.clone()) } else { None }; let res = Ok(ClientWriteResponse { log_id: entry.log_id, data: resp, membership, }); let send_res = tx.send(res); tracing::debug!( "send client response through tx, send_res is error: {}", send_res.is_err() ); } pub fn handle_special_log(&mut self, entry: &Entry<D>) { match &entry.payload { EntryPayload::Membership(ref m) => { if m.is_in_joint_consensus() { // nothing to do } else { self.handle_uniform_consensus_committed(&entry.log_id); } } EntryPayload::Blank => {} EntryPayload::Normal(_) => {} } } /// Apply the given log entry to the state machine. #[tracing::instrument(level = "debug", skip(self, entry))] pub(super) async fn apply_entry_to_state_machine(&mut self, entry: &Entry<D>) -> Result<R, StorageError> { self.handle_special_log(entry); // First, we just ensure that we apply any outstanding up to, but not including, the index // of the given entry. We need to be able to return the data response from applying this // entry to the state machine. // // Note that this would only ever happen if a node had unapplied logs from before becoming leader. let log_id = &entry.log_id; let index = log_id.index; let expected_next_index = match self.core.last_applied { None => 0, Some(log_id) => log_id.index + 1, }; if index != expected_next_index { let entries = self.core.storage.get_log_entries(expected_next_index..index).await?; if let Some(entry) = entries.last() { self.core.last_applied = Some(entry.log_id); } let data_entries: Vec<_> = entries.iter().collect(); if !data_entries.is_empty() { apply_to_state_machine( self.core.storage.clone(), &data_entries, self.core.config.max_applied_log_to_keep, ) .await?; } } // Apply this entry to the state machine and return its data response. let apply_res = apply_to_state_machine( self.core.storage.clone(), &[entry], self.core.config.max_applied_log_to_keep, ) .await?; // TODO(xp): deal with partial apply. self.core.last_applied = Some(*log_id); self.leader_report_metrics(); // TODO(xp) merge this function to replication_to_state_machine? Ok(apply_res.into_iter().next().unwrap()) } }
/** * The AbstractModelis an abstract Model providing caching for the identities of * players, items and item prototypes. * * @author Jan-Willem Gmelig Meyling * @author Thomas Gmelig Meyling */ @Root public abstract class AbstractModel implements Model { /** GameServer ticks */ protected final GameServerTicks gameServerTicks = new GameServerTicks(); /** Identities mapped to players for auth -> game server redirection */ protected final Map<Long, Player> players = Collections.synchronizedMap(new HashMap<Long, Player>()); /** Cached item prototypes for used items */ protected final Map<Long, ItemPrototype> itemPrototypes = Collections.synchronizedMap(new HashMap<Long, ItemPrototype>()); /** Cached item instances for used items */ protected final Map<Long, ItemInstance> itemInstances = Collections.synchronizedMap(new HashMap<Long, ItemInstance>()); /** AuthorizationPromises */ protected final Map<Long, AuthorizationPromise> authPromises = Collections.synchronizedMap(new HashMap<Long, AuthorizationPromise>()); protected final List<Guild> guilds = new CopyOnWriteArrayList<Guild>(); private volatile long INCREMENTING_IDENTITY = 0; /** * @return a player identity between 1000000 and 1999999999 */ public long createPlayerIdentity() { return (INCREMENTING_IDENTITY++ % 1999000000) + 1000000; } @Override public ItemPrototype getItemPrototype(long staticID) throws AccessException { if (itemPrototypes.containsKey(staticID)) { return itemPrototypes.get(staticID); } ItemPrototype proto = fetchItemPrototype(staticID); itemPrototypes.put(staticID, proto); return proto; } @Override public EquipmentPrototype getEquipmentPrototype(long staticID) throws AccessException { return (EquipmentPrototype) getItemPrototype(staticID); } @Override public ItemInstance getItemInstance(long id) throws AccessException { ItemInstance it = itemInstances.get(id); if ( it == null ) it = fetchItemInstance(id); return it; } @Override public EquipmentInstance getEquipmentInstance(long id) throws AccessException { return (EquipmentInstance) getItemInstance(id); } @Override public AuthorizationPromise getAuthorizationPromise(Long identity) { return authPromises.remove(identity); } @Override public Player getPlayer(Long id) { return players.get(id); } /** * @return the players */ public Map<Long, Player> getPlayers() { return players; } public List<Guild> getGuilds() { return guilds; } @Override public Player loadPlayer(AuthorizationPromise promise) throws AccessException { Player hero = fetchPlayer(promise); fetchInventory(hero); fetchEquipment(hero); fetchSkill(hero); fetchProficiency(hero); players.put(hero.getIdentity(), hero); return hero; } /** * Used to load a Player instance from the model * @param promise * @return a Player instance based on the data in the model * @throws AccessException */ protected abstract Player fetchPlayer(AuthorizationPromise promise) throws AccessException; /** * Used to load the inventory content for a Player from the model * @param hero * @throws AccessException */ protected abstract void fetchInventory(Player hero) throws AccessException; /** * Used to load the equipments for a Player from the model * @param hero * @throws AccessException */ protected abstract void fetchEquipment(Player hero) throws AccessException; /** * Used to load the skills for a Player from the model * @param hero * @throws AccessException */ protected abstract void fetchSkill(Player hero) throws AccessException; /** * Used to load the Weapon Proficiencies for a Player from the model * @param hero * @throws AccessException */ protected abstract void fetchProficiency(Player hero) throws AccessException; /** * Fetch an ItemPrototype from the data model when not existing in the * memory * * @param id * for the ItemPrototype * @return {@code ItemPrototype} * @throws AccessException */ protected abstract ItemPrototype fetchItemPrototype(long id) throws AccessException; /** * Fetch an ItemInstance from the data model when not existing in the memory * @param id * @return ItemInstance * @throws AccessException */ protected abstract ItemInstance fetchItemInstance(long id) throws AccessException; protected abstract void fetchNPCs() throws AccessException; @Override public GameServerTicks getGameServerTicks() { return gameServerTicks; } }
/** * This class is the action form for all lookups. * * @deprecated Use {@link org.kuali.rice.krad.lookup.LookupForm}. */ @Deprecated public class LookupForm extends KualiForm { private static final long serialVersionUID = 1L; private static final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(LookupForm.class); protected static final String HEADER_BAR_ENABLED_PARAM = "headerBarEnabled"; protected static final String SEARCH_CRITERIA_ENABLED_PARAM = "searchCriteriaEnabled"; private String formKey; private Map<String, String> fields; private Map<String, String> fieldsForLookup; private String lookupableImplServiceName; private String conversionFields; private Map<String, String> fieldConversions; private String businessObjectClassName; private Lookupable lookupable; private boolean hideReturnLink = false; private boolean suppressActions = false; private boolean multipleValues = false; private String lookupAnchor; private String readOnlyFields; private List readOnlyFieldsList; private String referencesToRefresh; private boolean searchUsingOnlyPrimaryKeyValues; private String primaryKeyFieldLabels; private boolean showMaintenanceLinks = false; private String docNum; private String htmlDataType; private String lookupObjectId; private boolean lookupCriteriaEnabled = true; private boolean supplementalActionsEnabled = false; private boolean actionUrlsExist = false; private boolean ddExtraButton = false; private boolean headerBarEnabled = true; private boolean disableSearchButtons = false; private String isAdvancedSearch; /** * @see KualiForm#addRequiredNonEditableProperties() */ public void addRequiredNonEditableProperties(){ super.addRequiredNonEditableProperties(); registerRequiredNonEditableProperty(KRADConstants.LOOKUPABLE_IMPL_ATTRIBUTE_NAME); registerRequiredNonEditableProperty(KRADConstants.BUSINESS_OBJECT_CLASS_ATTRIBUTE); registerRequiredNonEditableProperty(KRADConstants.DISPATCH_REQUEST_PARAMETER); registerRequiredNonEditableProperty(KRADConstants.DOC_FORM_KEY); registerRequiredNonEditableProperty(KRADConstants.REFRESH_CALLER); registerRequiredNonEditableProperty(KRADConstants.DOC_NUM); registerRequiredNonEditableProperty(KRADConstants.REFERENCES_TO_REFRESH); registerRequiredNonEditableProperty(KRADConstants.FORM_KEY); registerRequiredNonEditableProperty(KRADConstants.CONVERSION_FIELDS_PARAMETER); registerRequiredNonEditableProperty(KRADConstants.FIELDS_CONVERSION_PARAMETER); registerRequiredNonEditableProperty(KRADConstants.HIDE_LOOKUP_RETURN_LINK); registerRequiredNonEditableProperty(KRADConstants.MULTIPLE_VALUE); registerRequiredNonEditableProperty(KRADConstants.BACK_LOCATION); registerRequiredNonEditableProperty(KRADConstants.LOOKUP_ANCHOR); registerRequiredNonEditableProperty("searchUsingOnlyPrimaryKeyValues"); registerRequiredNonEditableProperty(KRADConstants.MULTIPLE_VALUE_LOOKUP_PREVIOUSLY_SELECTED_OBJ_IDS_PARAM); registerRequiredNonEditableProperty(KRADConstants.TableRenderConstants.VIEWED_PAGE_NUMBER); } /** * @return the htmlDataType */ public String getHtmlDataType() { return this.htmlDataType; } /** * @param htmlDataType the htmlDataType to set */ public void setHtmlDataType(String htmlDataType) { this.htmlDataType = htmlDataType; } /** * @return the docNum */ public String getDocNum() { return this.docNum; } /** * @param docNum the docNum to set */ public void setDocNum(String docNum) { this.docNum = docNum; } /** * Whether the results contain at least one row that is returnable. */ private boolean hasReturnableRow; // used for internal purposes in populate private Map requestParameters; /** * Stores the incoming request parameters so that they can be passed to the Lookupable implementation. */ @Override public void postprocessRequestParameters(Map requestParameters) { this.requestParameters = requestParameters; super.postprocessRequestParameters(requestParameters); } /** * Picks out business object name from the request to get retrieve a lookupable and set properties. */ public void populate(HttpServletRequest request) { super.populate(request); DataDictionaryService ddService = KRADServiceLocatorWeb.getDataDictionaryService(); try { Lookupable localLookupable = null; if (StringUtils.isBlank(getParameter(request, KRADConstants.LOOKUPABLE_IMPL_ATTRIBUTE_NAME)) && StringUtils.isBlank(getLookupableImplServiceName())) { // get the business object class for the lookup String localBusinessObjectClassName = getParameter(request, KRADConstants.BUSINESS_OBJECT_CLASS_ATTRIBUTE); if ( ExternalizableBusinessObjectUtils.isExternalizableBusinessObjectInterface(localBusinessObjectClassName) ) { Class localBusinessObjectClass = Class.forName(localBusinessObjectClassName); localBusinessObjectClassName = KRADServiceLocatorWeb.getKualiModuleService().getResponsibleModuleService(localBusinessObjectClass).getExternalizableBusinessObjectImplementation(localBusinessObjectClass).getName(); } setBusinessObjectClassName(localBusinessObjectClassName); if (StringUtils.isBlank(localBusinessObjectClassName)) { LOG.error("Business object class not passed to lookup."); throw new RuntimeException("Business object class not passed to lookup."); } // call data dictionary service to get lookup impl for bo class String lookupImplID = KNSServiceLocator.getBusinessObjectDictionaryService().getLookupableID(Class.forName(localBusinessObjectClassName)); if (lookupImplID == null) { lookupImplID = "kualiLookupable"; } setLookupableImplServiceName(lookupImplID); } localLookupable = this.getLookupable(getLookupableImplServiceName()); if (localLookupable == null) { LOG.error("Lookup impl not found for lookup impl name " + getLookupableImplServiceName()); throw new RuntimeException("Lookup impl not found for lookup impl name " + getLookupableImplServiceName()); } // set parameters on lookupable, add Advanced Search parameter Map<String, String[]> parameters = new TreeMap<String, String[]>(); parameters.putAll(requestParameters); parameters.put(KRADConstants.ADVANCED_SEARCH_FIELD, new String[]{isAdvancedSearch}); localLookupable.setParameters(parameters); requestParameters = null; if (getParameter(request, KRADConstants.LOOKUPABLE_IMPL_ATTRIBUTE_NAME) != null) { setLookupableImplServiceName(getParameter(request, KRADConstants.LOOKUPABLE_IMPL_ATTRIBUTE_NAME)); } if (getParameter(request, KRADConstants.LOOKUPABLE_IMPL_ATTRIBUTE_NAME) != null) { setLookupableImplServiceName(getParameter(request, KRADConstants.LOOKUPABLE_IMPL_ATTRIBUTE_NAME)); } // check the doc form key is empty before setting so we don't override a restored lookup form if (request.getAttribute(KRADConstants.DOC_FORM_KEY) != null && StringUtils.isBlank(this.getFormKey())) { setFormKey((String) request.getAttribute(KRADConstants.DOC_FORM_KEY)); } else if (getParameter(request, KRADConstants.DOC_FORM_KEY) != null && StringUtils.isBlank(this.getFormKey())) { setFormKey(getParameter(request, KRADConstants.DOC_FORM_KEY)); } if (getParameter(request, KRADConstants.DOC_NUM) != null) { setDocNum(getParameter(request, KRADConstants.DOC_NUM)); } String returnLocation = getParameter(request, "returnLocation"); if (StringUtils.isNotBlank(returnLocation)) { setBackLocation(returnLocation); localLookupable.getLookupableHelperService().setBackLocation(returnLocation); } if (getParameter(request, "conversionFields") != null) { setConversionFields(getParameter(request, "conversionFields")); } if (getParameter(request, KRADConstants.EXTRA_BUTTON_SOURCE) != null) { //these are not sourced from the DD/Lookupable ddExtraButton=false; setExtraButtonSource(getParameter(request, KRADConstants.EXTRA_BUTTON_SOURCE)); } if (getParameter(request, KRADConstants.EXTRA_BUTTON_PARAMS) != null) { setExtraButtonParams(getParameter(request, KRADConstants.EXTRA_BUTTON_PARAMS)); } String value = getParameter(request, "multipleValues"); if (value != null) { if ("YES".equals(value.toUpperCase())) { setMultipleValues(true); } else { setMultipleValues(new Boolean(getParameter(request, "multipleValues")).booleanValue()); } } if (getParameter(request, KRADConstants.REFERENCES_TO_REFRESH) != null) { setReferencesToRefresh(getParameter(request, KRADConstants.REFERENCES_TO_REFRESH)); } if (getParameter(request, "readOnlyFields") != null) { setReadOnlyFields(getParameter(request, "readOnlyFields")); setReadOnlyFieldsList(LookupUtils.translateReadOnlyFieldsToList(this.readOnlyFields)); localLookupable.setReadOnlyFieldsList(getReadOnlyFieldsList()); } /* Show/Hide All Criteria and/or the Workflow Header Bar * The default value of each of the following parameters is 'true' in order to always show both the criteria and the header bar. * To hide the header bar use the URL parameter 'headerBarEnabled' and set the value to 'false'. * To hide all the search criteria (including the buttons) set the URL parameter 'searchCriteriaEnabled' to 'false'. */ setHeaderBarEnabled(Truth.strToBooleanIgnoreCase(getParameter(request, HEADER_BAR_ENABLED_PARAM), Boolean.TRUE)); setLookupCriteriaEnabled(Truth.strToBooleanIgnoreCase(getParameter(request, SEARCH_CRITERIA_ENABLED_PARAM), Boolean.TRUE)); // init lookupable with bo class localLookupable.setBusinessObjectClass((Class<? extends BusinessObject>) Class.forName(getBusinessObjectClassName())); Map<String, String> fieldValues = new HashMap<String, String>(); Map<String, String> formFields = getFields(); Class boClass = Class.forName(getBusinessObjectClassName()); for (Iterator iter = localLookupable.getRows().iterator(); iter.hasNext();) { Row row = (Row) iter.next(); for (Iterator iterator = row.getFields().iterator(); iterator.hasNext();) { Field field = (Field) iterator.next(); // check whether form already has value for field if (formFields != null && formFields.containsKey(field.getPropertyName())) { field.setPropertyValue(formFields.get(field.getPropertyName())); } // override values with request if (getParameter(request, field.getPropertyName()) != null) { if(!Field.MULTI_VALUE_FIELD_TYPES.contains(field.getFieldType())) { field.setPropertyValue(getParameter(request, field.getPropertyName()).trim()); } else { //multi value, set to values field.setPropertyValues(getParameterValues(request, field.getPropertyName())); } } field.setPropertyValue(org.kuali.rice.krad.lookup.LookupUtils .forceUppercase(boClass, field.getPropertyName(), field.getPropertyValue())); fieldValues.put(field.getPropertyName(), field.getPropertyValue()); //LOG.info("field name/value added was: " + field.getPropertyName() + field.getPropertyValue()); localLookupable.applyFieldAuthorizationsFromNestedLookups(field); } } if (localLookupable.checkForAdditionalFields(fieldValues)) { for (Iterator iter = localLookupable.getRows().iterator(); iter.hasNext();) { Row row = (Row) iter.next(); for (Iterator iterator = row.getFields().iterator(); iterator.hasNext();) { Field field = (Field) iterator.next(); // check whether form already has value for field if (formFields != null && formFields.containsKey(field.getPropertyName())) { field.setPropertyValue(formFields.get(field.getPropertyName())); } // override values with request if (getParameter(request, field.getPropertyName()) != null) { if(!Field.MULTI_VALUE_FIELD_TYPES.contains(field.getFieldType())) { field.setPropertyValue(getParameter(request, field.getPropertyName()).trim()); } else { //multi value, set to values field.setPropertyValues(getParameterValues(request, field.getPropertyName())); } } fieldValues.put(field.getPropertyName(), field.getPropertyValue()); } } } fieldValues.put(KRADConstants.DOC_FORM_KEY, this.getFormKey()); fieldValues.put(KRADConstants.BACK_LOCATION, this.getBackLocation()); if(this.getDocNum() != null){ fieldValues.put(KRADConstants.DOC_NUM, this.getDocNum()); } if (StringUtils.isNotBlank(getReferencesToRefresh())) { fieldValues.put(KRADConstants.REFERENCES_TO_REFRESH, this.getReferencesToRefresh()); } this.setFields(fieldValues); setFieldConversions(LookupUtils.translateFieldConversions(this.conversionFields)); localLookupable.setFieldConversions(getFieldConversions()); if(StringUtils.isNotEmpty(localLookupable.getExtraButtonSource())) { setExtraButtonSource(localLookupable.getExtraButtonSource()); //also set the boolean so the jsp can use an action button ddExtraButton=true; } if(StringUtils.isNotEmpty(localLookupable.getExtraButtonParams())) { setExtraButtonParams(localLookupable.getExtraButtonParams()); } setLookupable(localLookupable); setFieldsForLookup(fieldValues); // if showMaintenanceLinks is not already true, only show maintenance links if the lookup was called from the portal (or index.html for the generated applications) if (!isShowMaintenanceLinks()) { if (StringUtils.contains(getBackLocation(), "/"+ KRADConstants.PORTAL_ACTION) || StringUtils.contains(getBackLocation(), "/index.html")) { showMaintenanceLinks = true; } } } catch (ClassNotFoundException e) { LOG.error("Business Object class " + getBusinessObjectClassName() + " not found"); throw new RuntimeException("Business Object class " + getBusinessObjectClassName() + " not found", e); } } /** * @return Returns the lookupableImplServiceName. */ public String getLookupableImplServiceName() { return lookupableImplServiceName; } protected Lookupable getLookupable(String beanName) { return KNSServiceLocator.getLookupable(beanName); } /** * @param lookupableImplServiceName The lookupableImplServiceName to set. */ public void setLookupableImplServiceName(String lookupableImplServiceName) { this.lookupableImplServiceName = lookupableImplServiceName; } /** * @return Returns the formKey. */ public String getFormKey() { return formKey; } /** * @param formKey The formKey to set. */ public void setFormKey(String formKey) { this.formKey = formKey; } /** * @return Returns the fields. */ public Map<String, String> getFields() { return fields; } /** * @param fields The fields to set. */ public void setFields(Map<String, String> fields) { this.fields = fields; } /** * @return Returns the conversionFields. */ public String getConversionFields() { return conversionFields; } /** * @param conversionFields The conversionFields to set. */ public void setConversionFields(String conversionFields) { this.conversionFields = conversionFields; } /** * @return Returns the fieldConversions. */ public Map<String, String> getFieldConversions() { return fieldConversions; } /** * @param fieldConversions The fieldConversions to set. */ public void setFieldConversions(Map<String, String> fieldConversions) { this.fieldConversions = fieldConversions; } /** * @return Returns the businessObjectClassName. */ public String getBusinessObjectClassName() { return businessObjectClassName; } /** * @param businessObjectClassName The businessObjectClassName to set. */ public void setBusinessObjectClassName(String businessObjectClassName) { this.businessObjectClassName = businessObjectClassName; } /** * @return Returns the kualiLookupable. */ public Lookupable getLookupable() { return lookupable; } /** * @param lookupable The kualiLookupable to set. */ public void setLookupable(Lookupable lookupable) { this.lookupable = lookupable; } /** * @return Returns the hideReturnLink. */ public boolean isHideReturnLink() { return hideReturnLink; } /** * @param suppressActions The suppressActions to set. */ public void setSuppressActions(boolean suppressActions) { this.suppressActions = suppressActions; } /** * @return Returns the suppressActions. */ public boolean isSuppressActions() { return suppressActions; } /** * @param hideReturnLink The hideReturnLink to set. */ public void setHideReturnLink(boolean hideReturnLink) { this.hideReturnLink = hideReturnLink; } // TODO: remove these once DD changes have been made public String getExtraButtonParams() { return extraButtons.get(0).getExtraButtonParams(); } // TODO: remove these once DD changes have been made public void setExtraButtonParams(String extraButtonParams) { extraButtons.get(0).setExtraButtonParams( extraButtonParams ); } // TODO: remove these once DD changes have been made public String getExtraButtonSource() { return extraButtons.get(0).getExtraButtonSource(); } // TODO: remove these once DD changes have been made public void setExtraButtonSource(String extraButtonSource) { extraButtons.get(0).setExtraButtonSource( extraButtonSource ); } /** * * @return whether this form returns multiple values */ public boolean isMultipleValues() { return multipleValues; } /** * * @param multipleValues - specify whether this form returns multiple values (i.e. a Collection) */ public void setMultipleValues(boolean multipleValues) { this.multipleValues = multipleValues; } public String getLookupAnchor() { return lookupAnchor; } public void setLookupAnchor(String lookupAnchor) { this.lookupAnchor = lookupAnchor; } /** * Gets the fieldsForLookup attribute. * @return Returns the fieldsForLookup. */ public Map getFieldsForLookup() { return fieldsForLookup; } /** * Sets the fieldsForLookup attribute value. * @param fieldsForLookup The fieldsForLookup to set. */ public void setFieldsForLookup(Map fieldsForLookup) { this.fieldsForLookup = fieldsForLookup; } /** * Gets the readOnlyFields attribute. * @return Returns the readOnlyFields. */ public String getReadOnlyFields() { return readOnlyFields; } /** * Sets the readOnlyFields attribute value. * @param readOnlyFields The readOnlyFields to set. */ public void setReadOnlyFields(String readOnlyFields) { this.readOnlyFields = readOnlyFields; } /** * Gets the readOnlyFieldsList attribute. * @return Returns the readOnlyFieldsList. */ public List getReadOnlyFieldsList() { return readOnlyFieldsList; } /** * Sets the readOnlyFieldsList attribute value. * @param readOnlyFieldsList The readOnlyFieldsList to set. */ public void setReadOnlyFieldsList(List readOnlyFieldsList) { this.readOnlyFieldsList = readOnlyFieldsList; } public String getReferencesToRefresh() { return referencesToRefresh; } public void setReferencesToRefresh(String referencesToRefresh) { this.referencesToRefresh = referencesToRefresh; } public String getPrimaryKeyFieldLabels() { return primaryKeyFieldLabels; } public void setPrimaryKeyFieldLabels(String primaryKeyFieldLabels) { this.primaryKeyFieldLabels = primaryKeyFieldLabels; } public boolean isSearchUsingOnlyPrimaryKeyValues() { return searchUsingOnlyPrimaryKeyValues; } public void setSearchUsingOnlyPrimaryKeyValues(boolean searchUsingOnlyPrimaryKeyValues) { this.searchUsingOnlyPrimaryKeyValues = searchUsingOnlyPrimaryKeyValues; } /** * If true the maintenance links (create new, edit, copy, delete) are displayed on the lookup * * @return showMaintenanceLinks. */ public boolean isShowMaintenanceLinks() { return showMaintenanceLinks; } /** * Sets the showMaintenanceLinks attribute value. * @param showMaintenanceLinks The showMaintenanceLinks to set. */ public void setShowMaintenanceLinks(boolean showMaintenanceLinks) { this.showMaintenanceLinks = showMaintenanceLinks; } /** * Returns whether the results contain at least one row that is returnable * * @return */ public boolean isHasReturnableRow() { return this.hasReturnableRow; } /** * Sets whether the results contain at least one row that is returnable * * @param hasReturnableRow */ public void setHasReturnableRow(boolean hasReturnableRow) { this.hasReturnableRow = hasReturnableRow; } /** * @return the lookupObjectId */ public String getLookupObjectId() { return this.lookupObjectId; } /** * @param lookupObjectId the lookupObjectId to set */ public void setLookupObjectId(String lookupObjectId) { this.lookupObjectId = lookupObjectId; } /** * @return the lookupCriteriaEnabled */ public boolean isLookupCriteriaEnabled() { return this.lookupCriteriaEnabled; } /** * @param lookupCriteriaEnabled the lookupCriteriaEnabled to set */ public void setLookupCriteriaEnabled(boolean lookupCriteriaEnabled) { this.lookupCriteriaEnabled = lookupCriteriaEnabled; } /** * @return the supplementalActionsEnabled */ public boolean isSupplementalActionsEnabled() { return this.supplementalActionsEnabled; } /** * @param supplementalActionsEnabled the supplementalActionsEnabled to set */ public void setSupplementalActionsEnabled(boolean supplementalActionsEnabled) { this.supplementalActionsEnabled = supplementalActionsEnabled; } /** * @param actionUrlsExist the actionUrlsExist to set */ public void setActionUrlsExist(boolean actionUrlsExist) { this.actionUrlsExist = actionUrlsExist; } /** * @return the actionUrlsExist */ public boolean isActionUrlsExist() { return actionUrlsExist; } /** * @return the ddExtraButton */ public boolean isDdExtraButton() { return this.ddExtraButton; } /** * @param ddExtraButton the ddExtraButton to set */ public void setDdExtraButton(boolean ddExtraButton) { this.ddExtraButton = ddExtraButton; } public boolean isHeaderBarEnabled() { return headerBarEnabled; } public void setHeaderBarEnabled(boolean headerBarEnabled) { this.headerBarEnabled = headerBarEnabled; } public boolean isDisableSearchButtons() { return this.disableSearchButtons; } public void setDisableSearchButtons(boolean disableSearchButtons) { this.disableSearchButtons = disableSearchButtons; } /** * Retrieves the String value for the isAdvancedSearch property. * * <p> * The isAdvancedSearch property is also used as a http request parameter. The property name must * match <code>KRADConstants.ADVANCED_SEARCH_FIELD</code> for the button setup and javascript toggling of the value * to work. * </p> * * @return String "YES" if advanced search set, "NO" or null for basic search. */ public String getIsAdvancedSearch() { return this.isAdvancedSearch; } /** * Sets the isAdvancedSearch String value. * * @param advancedSearch - "YES" for advanced search, "NO" for basic search */ public void setIsAdvancedSearch(String advancedSearch) { this.isAdvancedSearch = advancedSearch; } /** * Determines whether the search/clear buttons should be rendering based on the form property * and what is configured in the data dictionary for the lookup * * @return boolean true if the buttons should be rendered, false if they should not be */ public boolean getRenderSearchButtons() { boolean renderSearchButtons = true; if (disableSearchButtons || KNSServiceLocator.getBusinessObjectDictionaryService().disableSearchButtonsInLookup( getLookupable().getBusinessObjectClass())) { renderSearchButtons = false; } return renderSearchButtons; } }
/** * Value object representing a certificate bundle consisting of a private key, the * certificate and the issuer certificate. Certificate and keys can be either DER or PEM * encoded. RSA and Elliptic Curve keys and certificates can be converted to a * {@link KeySpec} respective {@link X509Certificate} object. Supports creation of * {@link #createKeyStore(String) key stores} that contain the key and the certificate * chain. * * @author Mark Paluch * @author Alex Bremora * @see #getPrivateKeySpec() * @see #getX509Certificate() * @see #getIssuingCaCertificate() * @see PemObject */ @JsonIgnoreProperties(ignoreUnknown = true) public class CertificateBundle extends Certificate { private final String privateKey; @Nullable private final String privateKeyType; private final List<String> caChain; /** * Create a new {@link CertificateBundle}. * @param serialNumber the serial number. * @param certificate the certificate. * @param issuingCaCertificate the issuing CA certificate. * @param caChain the CA chain. * @param privateKey the private key. * @param privateKeyType the private key type. */ CertificateBundle(@JsonProperty("serial_number") String serialNumber, @JsonProperty("certificate") String certificate, @JsonProperty("issuing_ca") String issuingCaCertificate, @JsonProperty("ca_chain") List<String> caChain, @JsonProperty("private_key") String privateKey, @Nullable @JsonProperty("private_key_type") String privateKeyType) { super(serialNumber, certificate, issuingCaCertificate); this.privateKey = privateKey; this.privateKeyType = privateKeyType; this.caChain = caChain; } /** * Create a {@link CertificateBundle} given a private key with certificates and the * serial number. * @param serialNumber must not be empty or {@literal null}. * @param certificate must not be empty or {@literal null}. * @param issuingCaCertificate must not be empty or {@literal null}. * @param privateKey must not be empty or {@literal null}. * @return the {@link CertificateBundle} instead. */ public static CertificateBundle of(String serialNumber, String certificate, String issuingCaCertificate, String privateKey) { Assert.hasText(serialNumber, "Serial number must not be empty"); Assert.hasText(certificate, "Certificate must not be empty"); Assert.hasText(issuingCaCertificate, "Issuing CA certificate must not be empty"); Assert.hasText(privateKey, "Private key must not be empty"); return new CertificateBundle(serialNumber, certificate, issuingCaCertificate, Collections.singletonList(issuingCaCertificate), privateKey, null); } /** * Create a {@link CertificateBundle} given a private key with certificates and the * serial number. * @param serialNumber must not be empty or {@literal null}. * @param certificate must not be empty or {@literal null}. * @param issuingCaCertificate must not be empty or {@literal null}. * @param privateKey must not be empty or {@literal null}. * @param privateKeyType must not be empty or {@literal null}. * @return the {@link CertificateBundle} * @since 2.4 */ public static CertificateBundle of(String serialNumber, String certificate, String issuingCaCertificate, String privateKey, @Nullable String privateKeyType) { Assert.hasText(serialNumber, "Serial number must not be empty"); Assert.hasText(certificate, "Certificate must not be empty"); Assert.hasText(issuingCaCertificate, "Issuing CA certificate must not be empty"); Assert.hasText(privateKey, "Private key must not be empty"); Assert.hasText(privateKeyType, "Private key type must not be empty"); return new CertificateBundle(serialNumber, certificate, issuingCaCertificate, Collections.singletonList(issuingCaCertificate), privateKey, privateKeyType); } /** * @return the private key (decrypted form, PEM or DER-encoded) */ public String getPrivateKey() { return this.privateKey; } /** * @return the private key type, can be {@literal null}. * @since 2.4 */ @Nullable public String getPrivateKeyType() { return this.privateKeyType; } /** * @return the required private key type, can be {@literal null}. * @since 2.4 * @throws IllegalStateException if the private key type is {@literal null} */ public String getRequiredPrivateKeyType() { String privateKeyType = getPrivateKeyType(); if (privateKeyType == null) { throw new IllegalStateException("Private key type is not set"); } return privateKeyType; } /** * Retrieve the private key as {@link KeySpec}. * @return the private {@link KeySpec}. {@link java.security.KeyFactory} can generate * a {@link java.security.PrivateKey} from this {@link KeySpec}. */ public KeySpec getPrivateKeySpec() { try { return getPrivateKey(getPrivateKey(), getRequiredPrivateKeyType()); } catch (IOException | GeneralSecurityException e) { throw new VaultException("Cannot create KeySpec from private key", e); } } /** * Create a {@link KeyStore} from this {@link CertificateBundle} containing the * private key and certificate chain. * @param keyAlias the key alias to use. * @return the {@link KeyStore} containing the private key and certificate chain. */ public KeyStore createKeyStore(String keyAlias) { return createKeyStore(keyAlias, false); } /** * Create a {@link KeyStore} from this {@link CertificateBundle} containing the * private key and certificate chain. * @param keyAlias the key alias to use. * @param includeCaChain whether to include the certificate authority chain instead of * just the issuer certificate. * @return the {@link KeyStore} containing the private key and certificate chain. * @since 2.3.3 */ public KeyStore createKeyStore(String keyAlias, boolean includeCaChain) { Assert.hasText(keyAlias, "Key alias must not be empty"); try { List<X509Certificate> certificates = new ArrayList<>(); certificates.add(getX509Certificate()); if (includeCaChain) { certificates.addAll(getX509IssuerCertificates()); } else { certificates.add(getX509IssuerCertificate()); } return KeystoreUtil.createKeyStore(keyAlias, getPrivateKeySpec(), certificates.toArray(new X509Certificate[0])); } catch (GeneralSecurityException | IOException e) { throw new VaultException("Cannot create KeyStore", e); } } /** * Retrieve the issuing CA certificates as list of {@link X509Certificate}. * @return the issuing CA {@link X509Certificate}. * @since 2.3.3 */ public List<X509Certificate> getX509IssuerCertificates() { List<X509Certificate> certificates = new ArrayList<>(); for (String data : caChain) { try { certificates.addAll(getCertificates(data)); } catch (CertificateException e) { throw new VaultException("Cannot create Certificate from issuing CA certificate", e); } } return certificates; } private static KeySpec getPrivateKey(String privateKey, String keyType) throws GeneralSecurityException, IOException { Assert.hasText(privateKey, "Private key must not be empty"); Assert.hasText(keyType, "Private key type must not be empty"); if (PemObject.isPemEncoded(privateKey)) { List<PemObject> pemObjects = PemObject.parse(privateKey); for (PemObject pemObject : pemObjects) { if (pemObject.isPrivateKey()) { return getPrivateKey(pemObject.getContent(), keyType); } } throw new IllegalArgumentException("No private key found in PEM-encoded key spec"); } return getPrivateKey(Base64Utils.decodeFromString(privateKey), keyType); } private static KeySpec getPrivateKey(byte[] privateKey, String keyType) throws GeneralSecurityException, IOException { switch (keyType.toLowerCase(Locale.ROOT)) { case "rsa": return KeyFactories.RSA_PRIVATE.getKey(privateKey); case "ec": return KeyFactories.EC.getKey(privateKey); } throw new IllegalArgumentException( String.format("Key type %s not supported. Supported types are: rsa, ec.", keyType)); } }
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::client::Client, crate::object::{NewObjectExt, ObjectRef, RequestReceiver}, crate::seat::Seat, anyhow::Error, wayland::{ WlDataDevice, WlDataDeviceManager, WlDataDeviceManagerRequest, WlDataDeviceRequest, WlDataSource, WlDataSourceRequest, }, }; /// An implementation of the wl_data_device_manager global. pub struct DataDeviceManager; impl DataDeviceManager { /// Creates a new `DataDeviceManager`. pub fn new() -> Self { DataDeviceManager } } impl RequestReceiver<WlDataDeviceManager> for DataDeviceManager { fn receive( _this: ObjectRef<Self>, request: WlDataDeviceManagerRequest, client: &mut Client, ) -> Result<(), Error> { match request { WlDataDeviceManagerRequest::CreateDataSource { id } => { id.implement(client, DataSource)?; } WlDataDeviceManagerRequest::GetDataDevice { id, seat } => { id.implement(client, DataDevice(seat.into()))?; } } Ok(()) } } struct DataSource; impl RequestReceiver<WlDataSource> for DataSource { fn receive( this: ObjectRef<Self>, request: WlDataSourceRequest, client: &mut Client, ) -> Result<(), Error> { match request { WlDataSourceRequest::Destroy => { client.delete_id(this.id())?; } WlDataSourceRequest::Offer { .. } => {} WlDataSourceRequest::SetActions { .. } => {} } Ok(()) } } struct DataDevice(ObjectRef<Seat>); impl RequestReceiver<WlDataDevice> for DataDevice { fn receive( this: ObjectRef<Self>, request: WlDataDeviceRequest, client: &mut Client, ) -> Result<(), Error> { match request { WlDataDeviceRequest::Release => { client.delete_id(this.id())?; } WlDataDeviceRequest::StartDrag { .. } => {} WlDataDeviceRequest::SetSelection { .. } => {} } Ok(()) } }
class FastqReader: """Base fastq reader""" def __init__(self, fname, record_cls=FastqRecord): self.fname = fname self.record_cls = record_cls self._next = None self.fobj = ( gzip.open(fname, "rb") if fname.endswith(".gz") else open(fname, "rb") ) self.f = io.BufferedReader(self.fobj) def __iter__(self): return self def next(self): seqid = self.f.readline().decode("utf-8").rstrip("\r\n") sequence = self.f.readline().decode("utf-8").rstrip("\r\n") qid = self.f.readline().decode("utf-8").rstrip("\r\n") qual = self.f.readline().decode("utf-8").rstrip("\r\n") if qual: record = self.record_cls(seqid, sequence, qid, qual) self._next = record return record else: self._next = None raise StopIteration def __next__(self): return self.next() def close(self): """ Close the reader """ self.fobj.close()
/* unlock any pending access to the resource. subclasses should unlock * any function ASAP. */ static gboolean cmlv_grpc_sink_unlock (GstBaseSink * sink) { CmlvGrpcSink *grpcsink = CMLV_GRPC_SINK (sink); GST_DEBUG_OBJECT (grpcsink, "unlock"); return TRUE; }
<reponame>MainTendue/APP_LAMAINTENDUE import { Injectable } from '@angular/core'; import {Maraude} from '../../models/Maraude'; import {Subject} from 'rxjs'; import {apiLMT} from '../../environments/environment'; import {HttpClient} from '@angular/common/http'; import {LoadingService} from './loading-service.service'; import {AlertService} from './alert-service.service'; @Injectable({ providedIn: 'root' }) export class MaraudeService { private maraudeAPILMTUrl = `${apiLMT.url}/maraudes`; maraudesSubject = new Subject<Maraude[]>(); maraudes: Maraude[] = []; constructor(private alertService: AlertService, private loadingService: LoadingService, private http: HttpClient) {} emitMaraudesSubject() { if (this.maraudes) { this.maraudesSubject.next(this.maraudes); } this.loadingService.hideLoading(); } getAllMaraudes(): void { this.loadingService.showLoading(); this.maraudes = null; this.http.get<any>(this.maraudeAPILMTUrl).subscribe( next => { const maraudes = next; if (next) { this.maraudes = maraudes._embedded.maraudes; } console.log(this.maraudes); this.emitMaraudesSubject(); }, error => { this.handleError(error); } ); } // Création de la Maraude addMaraude(maraude: Maraude): void { this.loadingService.showLoading(); this.http.post<Maraude>(this.maraudeAPILMTUrl, maraude).subscribe( next => { this.maraudes[this.maraudes.indexOf(maraude)] = next; this.maraudes.unshift(next); this.emitMaraudesSubject(); }, error => { this.handleError(error); } ); } handleError(error): void { this.loadingService.hideLoading(); this.alertService.error(error.message); } }
#include "CondFormats/Calibration/interface/big.h" #include <iostream> //fill big void big::fill(size_t tVectorSize, size_t thVectorSize, size_t sVectorSize, const std::string& atitle) { for (size_t i = 0; i < tVectorSize; ++i) { big::bigEntry b; b.fill(i, 1.0); tVector_.push_back(b); } for (size_t i = 0; i < thVectorSize; ++i) { big::bigHeader h; h.fill(atitle); thVector_.push_back(h); } for (size_t i = 0; i < sVectorSize; ++i) { big::bigStore s; s.fill(atitle); sVector_.push_back(s); } } //fill bigEntry void big::bigEntry::fill(int r, float seed) { runnum = r; alpha = seed; cotalpha = seed; beta = seed; cotbeta = seed; costrk[0] = seed * 0.1; costrk[1] = seed * 0.2; costrk[2] = seed * 0.3; qavg = seed; symax = seed; dyone = seed; syone = seed; sxmax = seed; dxone = seed; sxone = seed; dytwo = seed; sytwo = seed; dxtwo = seed; sxtwo = seed; qmin = seed; for (int i = 0; i < parIDX::LEN1; ++i) { for (int j = 0; j < parIDX::LEN2; ++j) { for (int k = 0; k < parIDX::LEN3; ++k) { par[parIDX::indexOf(i, j, k)] = seed; } } } for (int i = 0; i < ytempIDX::LEN1; ++i) { for (int j = 0; j < ytempIDX::LEN2; ++j) { ytemp[ytempIDX::indexOf(i, j)] = seed; } } for (int i = 0; i < xtempIDX::LEN1; ++i) { for (int j = 0; j < xtempIDX::LEN2; ++j) { xtemp[xtempIDX::indexOf(i, j)] = seed; } } for (int i = 0; i < avgIDX::LEN1; ++i) { for (int j = 0; j < avgIDX::LEN2; ++j) { for (int k = 0; k < avgIDX::LEN3; ++k) { avg[avgIDX::indexOf(i, j, k)] = seed; } } } for (int i = 0; i < aqflIDX::LEN1; ++i) { for (int j = 0; j < aqflIDX::LEN2; ++j) { for (int k = 0; k < aqflIDX::LEN3; ++k) { aqfl[aqflIDX::indexOf(i, j, k)] = seed; } } } for (int i = 0; i < chi2IDX::LEN1; ++i) { for (int j = 0; j < chi2IDX::LEN2; ++j) { for (int k = 0; k < chi2IDX::LEN3; ++k) { chi2[chi2IDX::indexOf(i, j, k)] = seed; } } } for (int i = 0; i < spareIDX::LEN1; ++i) { for (int j = 0; j < spareIDX::LEN2; ++j) { spare[spareIDX::indexOf(i, j)] = seed; } } } //fill bigHeader void big::bigHeader::fill(const std::string& atitle) { title = std::string("atitle"); ID = 0; NBy = 1; NByx = 2; NBxx = 3; NFy = 4; NFyx = 5; NFxx = 6; vbias = 0.1; temperature = 0.2; fluence = 0.3; qscale = 0.4; s50 = 0.5; templ_version = 1; } //fill bigStore void big::bigStore::fill(const std::string& atitle) { head.fill(atitle); for (int i = 0; i < entbyIDX::LEN1; ++i) { bigEntry b; b.fill(i, 0.5 * i); entby[entbyIDX::indexOf(i)] = b; //or use push_back as prefer } std::cout << "length of entbx 1 " << entbxIDX::LEN1 << std::endl; std::cout << "length of entbx 2 " << entbxIDX::LEN2 << std::endl; std::cout << "total size of entbx " << entbxIDX::SIZE << std::endl; for (int i = 0; i < entbxIDX::LEN1; ++i) { for (int j = 0; j < entbxIDX::LEN2; ++j) { bigEntry c; c.fill(i * j, 0.3 * j); entbx[entbxIDX::indexOf(i, j)] = c; //or use push_back as prefer } } for (int i = 0; i < entfyIDX::LEN1; ++i) { bigEntry f; f.fill(i, 0.4 * i); entfy[entfyIDX::indexOf(i)] = f; //or use push_back as prefer } for (int i = 0; i < entfxIDX::LEN1; ++i) { for (int j = 0; j < entfxIDX::LEN2; ++j) { bigEntry f; f.fill(i * j, 0.25 * j); entfx[entfxIDX::indexOf(i, j)] = f; //or use push_back as prefer } } }