content
stringlengths
10
4.9M
/** * Inserts vertex layout information in a given layout map, based on a * string array description and node map. */ private void putVertexLayout(LayoutMap layoutMap, String[] parts, AttrGraph graph) throws FormatException { AttrNode node = graph.getNode(parts[1]); if (node == null) { throw new FormatException("Unknown node " + parts[1]); } Rectangle bounds = toBounds(parts, 2); if (bounds == null) { throw new FormatException("Bounds for " + parts[1] + " cannot be parsed"); } layoutMap.putNode(node, new JVertexLayout(bounds)); }
<filename>src/tss2-fapi/api/Fapi_Decrypt.c /* SPDX-License-Identifier: BSD-2-Clause */ /******************************************************************************* * Copyright 2018-2019, Fraunhofer SIT sponsored by Infineon Technologies AG * All rights reserved. ******************************************************************************/ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <errno.h> #include <string.h> #include "tss2_fapi.h" #include "fapi_int.h" #include "fapi_util.h" #include "tss2_esys.h" #include "fapi_crypto.h" #include "fapi_policy.h" #include "ifapi_policyutil_execute.h" #include "ifapi_json_deserialize.h" #define LOGMODULE fapi #include "util/log.h" #include "util/aux_util.h" /** One-Call function for Fapi_Decrypt * * Decrypts data that was previously encrypted with Fapi_Encrypt. * * @param [in, out] context The FAPI_CONTEXT * @param [in] cipherText The ciphertext to decrypt * @param [out] plainText the decrypted ciphertext. May be NULL * (callee-allocated) * @param [out] plainTextSize The size of the ciphertext in bytes. May be NULL * * @retval TSS2_RC_SUCCESS: if the function call was a success. * @retval TSS2_FAPI_RC_BAD_REFERENCE: if context or cipherText is NULL. * @retval TSS2_FAPI_RC_BAD_CONTEXT: if context corruption is detected. * @retval TSS2_FAPI_RC_KEY_NOT_FOUND: if can’t find the key necessary to decrypt * the file. * @retval TSS2_FAPI_RC_BAD_KEY: if the decryption key is unsuitable for the * requested operation. * @retval TSS2_FAPI_RC_BAD_VALUE: if the decryption fails * @retval TSS2_FAPI_RC_BAD_SEQUENCE: if the context has an asynchronous * operation already pending. * @retval TSS2_FAPI_RC_IO_ERROR: if the data cannot be saved. * @retval TSS2_FAPI_RC_MEMORY: if the FAPI cannot allocate enough memory for * internal operations or return parameters. */ TSS2_RC Fapi_Decrypt( FAPI_CONTEXT *context, char const *cipherText, uint8_t **plainText, size_t *plainTextSize) { LOG_TRACE("called for context:%p", context); TSS2_RC r, r2; /* Check for NULL parameters */ check_not_null(context); check_not_null(cipherText); /* Check whether TCTI and ESYS are initialized */ return_if_null(context->esys, "Command can't be executed in none TPM mode.", TSS2_FAPI_RC_NO_TPM); /* If the async state automata of FAPI shall be tested, then we must not set the timeouts of ESYS to blocking mode. During testing, the mssim tcti will ensure multiple re-invocations. Usually however the synchronous invocations of FAPI shall instruct ESYS to block until a result is available. */ #ifndef TEST_FAPI_ASYNC r = Esys_SetTimeout(context->esys, TSS2_TCTI_TIMEOUT_BLOCK); return_if_error_reset_state(r, "Set Timeout to blocking"); #endif /* TEST_FAPI_ASYNC */ r = Fapi_Decrypt_Async(context, cipherText); return_if_error_reset_state(r, "Data_Encrypt"); do { /* We wait for file I/O to be ready if the FAPI state automata are in a file I/O state. */ r = ifapi_io_poll(&context->io); return_if_error(r, "Something went wrong with IO polling"); /* Repeatedly call the finish function, until FAPI has transitioned through all execution stages / states of this invocation. */ r = Fapi_Decrypt_Finish(context, plainText, plainTextSize); } while ((r & ~TSS2_RC_LAYER_MASK) == TSS2_BASE_RC_TRY_AGAIN); /* Reset the ESYS timeout to non-blocking, immediate response. */ r2 = Esys_SetTimeout(context->esys, 0); return_if_error(r2, "Set Timeout to non-blocking"); return_if_error_reset_state(r, "Data_Decrypt"); LOG_TRACE("finsihed"); return TSS2_RC_SUCCESS; } /** Asynchronous function for Fapi_Decrypt * * Decrypts data that was previously encrypted with Fapi_Encrypt. * * Call Fapi_Decrypt_Finish to finish the execution of this command. * * @param [in, out] context The FAPI_CONTEXT * @param [in] cipherText The ciphertext to decrypt * * @retval TSS2_RC_SUCCESS: if the function call was a success. * @retval TSS2_FAPI_RC_BAD_REFERENCE: if context or cipherText is NULL. * @retval TSS2_FAPI_RC_BAD_CONTEXT: if context corruption is detected. * @retval TSS2_FAPI_RC_KEY_NOT_FOUND: if can’t find the key necessary to decrypt * the file. * @retval TSS2_FAPI_RC_BAD_KEY: if the decryption key is unsuitable for the * requested operation. * @retval TSS2_FAPI_RC_BAD_VALUE: if the decryption fails * @retval TSS2_FAPI_RC_BAD_SEQUENCE: if the context has an asynchronous * operation already pending. * @retval TSS2_FAPI_RC_IO_ERROR: if the data cannot be saved. * @retval TSS2_FAPI_RC_MEMORY: if the FAPI cannot allocate enough memory for * internal operations or return parameters. */ TSS2_RC Fapi_Decrypt_Async( FAPI_CONTEXT *context, char const *cipherText) { LOG_TRACE("called for context:%p", context); LOG_TRACE("cipherText: %s", cipherText); TSS2_RC r; json_object *jso = NULL; /* Check for NULL parameters */ check_not_null(context); check_not_null(cipherText); /* Helpful alias pointers */ IFAPI_Data_EncryptDecrypt * command = &(context->cmd.Data_EncryptDecrypt); IFAPI_ENCRYPTED_DATA *encData = &command->enc_data; r = ifapi_session_init(context); return_if_error(r, "Initialize Decrypt"); encData->cipher.buffer = NULL; command->out_data = NULL; jso = json_tokener_parse(cipherText); return_if_null(jso, "Json error.", TSS2_FAPI_RC_BAD_VALUE); command->object_handle = ESYS_TR_NONE; r = ifapi_json_IFAPI_ENCRYPTED_DATA_deserialize(jso, encData); goto_if_error(r, "Invalid cipher object.", error_cleanup); command->in_data = &encData->cipher.buffer[0]; command->numBytes = encData->cipher.size; /* No sub path in keystore will be used (first param == NULL */ r = ifapi_get_entities(&context->keystore, NULL, &command->pathlist, &command->numPaths); goto_if_error(r, "get entities.", error_cleanup); /* Start with the last path */ command->path_idx = command->numPaths; r = ifapi_get_sessions_async(context, IFAPI_SESSION_GENEK | IFAPI_SESSION1, TPMA_SESSION_DECRYPT, 0); goto_if_error_reset_state(r, "Create sessions", error_cleanup); json_object_put(jso); context->state = DATA_DECRYPT_WAIT_FOR_SESSION; LOG_TRACE("finsihed"); return r; error_cleanup: if (jso) json_object_put(jso); SAFE_FREE(encData->cipher.buffer); return r; } /** Asynchronous finish function for Fapi_Decrypt * * This function should be called after a previous Fapi_Decrypt. * * @param [in, out] context The FAPI_CONTEXT * @param [out] plainText the decrypted ciphertext. May be NULL * (callee-allocated) * @param [out] plainTextSize The size of the ciphertext in bytes. May be NULL * * @retval TSS2_RC_SUCCESS: if the function call was a success. * @retval TSS2_FAPI_RC_BAD_REFERENCE: if context, plainText or plainTextSize * is NULL. * @retval TSS2_FAPI_RC_BAD_CONTEXT: if context corruption is detected. * @retval TSS2_FAPI_RC_BAD_SEQUENCE: if the context has an asynchronous * operation already pending. * @retval TSS2_FAPI_RC_IO_ERROR: if the data cannot be saved. * @retval TSS2_FAPI_RC_MEMORY: if the FAPI cannot allocate enough memory for * internal operations or return parameters. * @retval TSS2_FAPI_RC_TRY_AGAIN: if the asynchronous operation is not yet * complete. Call this function again later. */ TSS2_RC Fapi_Decrypt_Finish( FAPI_CONTEXT *context, uint8_t **plainText, size_t *plainTextSize) { LOG_TRACE("called for context:%p", context); TSS2_RC r; char *path; UINT32 pathIdx; TPM2B_PUBLIC_KEY_RSA *tpmPlainText = NULL; TPM2B_SENSITIVE_DATA *sym_key = NULL; /* Check for NULL parameters */ check_not_null(context); /* Helpful alias pointers */ IFAPI_OBJECT *encKeyObject = NULL; IFAPI_Data_EncryptDecrypt * command = &(context->cmd.Data_EncryptDecrypt); IFAPI_ENCRYPTED_DATA *encData = &command->enc_data; switch(context->state) { statecase(context->state, DATA_DECRYPT_WAIT_FOR_SESSION); r = ifapi_get_sessions_finish(context, &context->profiles.default_profile); return_try_again(r); goto_if_error_reset_state(r, " FAPI create session", error_cleanup); context->state = DATA_DECRYPT_SEARCH_KEY; fallthrough; statecase(context->state, DATA_DECRYPT_SEARCH_KEY); r = ifapi_keystore_search_obj(&context->keystore, &context->io, &command->enc_data.key_name, &path); return_try_again(r); goto_if_error(r, "Search Key", error_cleanup); r = ifapi_load_keys_async(context, path); goto_if_error(r, "Load keys.", error_cleanup); LOG_TRACE("Key found."); SAFE_FREE(path); context->state = DATA_DECRYPT_WAIT_FOR_KEY; fallthrough; statecase(context->state, DATA_DECRYPT_WAIT_FOR_KEY); r = ifapi_load_keys_finish(context, IFAPI_FLUSH_PARENT, &command->key_handle, &command->key_object); return_try_again(r); goto_if_error_reset_state(r, " Load key.", error_cleanup); encKeyObject = command->key_object; /* Symmetric decryption */ if (command->enc_data.type == IFAPI_SYM_BULK_ENCRYPTION && encKeyObject->misc.key.public.publicArea.type == TPM2_ALG_SYMCIPHER) { r = Esys_TRSess_SetAttributes(context->esys, context->session1, TPMA_SESSION_CONTINUESESSION, 0xff); goto_if_error_reset_state(r, "Set session attributes.", error_cleanup); r = ifapi_sym_encrypt_decrypt_async(context, command->in_data, command->in_dataSize, TPM2_YES); /**< decrypt (not encrypt) */ goto_if_error(r, "Symmetric decryption error.", error_cleanup); context-> state = DATA_DECRYPT_WAIT_FOR_SYM_ENCRYPTION; return TSS2_FAPI_RC_TRY_AGAIN; } if (command->enc_data.type == IFAPI_ASYM_BULK_ENCRYPTION && encKeyObject->misc.key.public.publicArea.type == TPM2_ALG_RSA) { context-> state = DATA_DECRYPT_AUTHORIZE_KEY; return TSS2_FAPI_RC_TRY_AGAIN; } else { goto_error(r, TSS2_FAPI_RC_GENERAL_FAILURE, "Invalid mode", error_cleanup); break; } statecase(context->state, DATA_DECRYPT_AUTHORIZE_KEY); r = ifapi_authorize_object(context, command->key_object, &command->auth_session); return_try_again(r); goto_if_error(r, "Authorize signature key.", error_cleanup); TPM2B_PRIVATE private; private.size = encData->sym_private.size; memcpy(&private.buffer[0], encData->sym_private.buffer, private.size); r = Esys_Load_Async(context->esys, command->key_handle, command->auth_session, ESYS_TR_NONE, ESYS_TR_NONE, &private, &encData->sym_public); goto_if_error(r, "Load async", error_cleanup); context->state = DATA_DECRYPT_WAIT_FOR_SEAL_KEY; fallthrough; statecase(context->state, DATA_DECRYPT_WAIT_FOR_SEAL_KEY); r = Esys_Load_Finish(context->esys, &command->object_handle); return_try_again(r); goto_if_error(r, "Load_Finish", error_cleanup); context->state = DATA_DECRYPT_FLUSH_KEY; fallthrough; statecase(context->state, DATA_DECRYPT_FLUSH_KEY); r = Esys_FlushContext_Async(context->esys, command->key_handle); goto_if_error(r, "Error: FlushContext", error_cleanup); context->state = DATA_DECRYPT_WAIT_FOR_FLUSH; fallthrough; statecase(context->state, DATA_DECRYPT_WAIT_FOR_FLUSH); r = Esys_FlushContext_Finish(context->esys); return_try_again(r); goto_if_error(r, "Error: FlushContext", error_cleanup); command->key_handle = ESYS_TR_NONE; if (!encData->sym_policy_harness.policy) { /* Object can be unsealed without authorization */ context->state = DATA_DECRYPT_UNSEAL_OBJECT; return TSS2_FAPI_RC_TRY_AGAIN; } r = ifapi_policyutil_execute_prepare(context, encData->sym_public.publicArea.nameAlg, &encData->sym_policy_harness); return_if_error(r, "Prepare policy execution."); context->policy.util_current_policy = context->policy.util_current_policy->prev; command->auth_session = ESYS_TR_NONE; fallthrough; statecase(context->state, DATA_DECRYPT_EXEC_POLICY); r = ifapi_policyutil_execute(context, &command->auth_session); return_try_again(r); goto_if_error(r, "Execute policy", error_cleanup); /* Clear continue session flag, so policy session will be flushed after authorization */ r = Esys_TRSess_SetAttributes(context->esys, command->auth_session, 0, TPMA_SESSION_CONTINUESESSION); goto_if_error(r, "Esys_TRSess_SetAttributes", error_cleanup); fallthrough; statecase(context->state, DATA_DECRYPT_UNSEAL_OBJECT); r = Esys_Unseal_Async(context->esys, command->object_handle, command->auth_session, ESYS_TR_NONE, ESYS_TR_NONE); goto_if_error(r, "Error esys Unseal ", error_cleanup); context->state = DATA_DECRYPT_WAIT_FOR_UNSEAL; fallthrough; statecase(context->state, DATA_DECRYPT_WAIT_FOR_UNSEAL); r = Esys_Unseal_Finish(context->esys, &sym_key); return_try_again(r); goto_if_error(r, "Unseal_Finish", error_cleanup); r = ifapi_crypto_aes_decrypt(&sym_key->buffer[0], encData->sym_key_size, &encData->sym_iv.buffer[0], encData->cipher.buffer, encData->cipher.size); SAFE_FREE(sym_key); goto_if_error(r, "Error esys Unseal ", error_cleanup); if (plainText) *plainText = encData->cipher.buffer; if (plainTextSize) *plainTextSize = encData->cipher.size; r = Esys_FlushContext_Async(context->esys, command->object_handle); goto_if_error(r, "Error: FlushContext", error_cleanup); context->state = DATA_DECRYPT_FLUSH_SYM_OBJECT; fallthrough; statecase(context->state, DATA_DECRYPT_FLUSH_SYM_OBJECT); r = Esys_FlushContext_Finish(context->esys); return_try_again(r); goto_if_error(r, "Flush object", error_cleanup); context->state = DATA_DECRYPT_CLEANUP; return TSS2_FAPI_RC_TRY_AGAIN; statecase(context->state, DATA_DECRYPT_NULL_AUTH_SENT); /* This is used later on to differentiate two cases. see below */ fallthrough; statecase(context->state, DATA_DECRYPT_WAIT_FOR_RSA_DECRYPTION); r = Esys_RSA_Decrypt_Finish(context->esys, &tpmPlainText); return_try_again(r); /* Retry with authorization callback after trial with null auth */ encKeyObject = command->key_object; if (((r & ~TPM2_RC_N_MASK) == TPM2_RC_BAD_AUTH) && (encKeyObject->misc.key.public.publicArea.objectAttributes & TPMA_OBJECT_NODA) && context->state == DATA_DECRYPT_NULL_AUTH_SENT) { context->state = DATA_DECRYPT_WAIT_FOR_RSA_DECRYPTION; r = ifapi_set_auth(context, encKeyObject, "Decrypt Key"); goto_if_error_reset_state(r, " Fapi_Decrypt", error_cleanup); TPM2B_PUBLIC_KEY_RSA *auxData = (TPM2B_PUBLIC_KEY_RSA *)&context->aux_data; TPM2B_DATA null_data = {.size = 0, .buffer = {} }; const IFAPI_PROFILE *profile; pathIdx = command->path_idx; path = command->pathlist[pathIdx]; r = ifapi_profiles_get(&context->profiles, path, &profile); goto_if_error(r, "Retrieving profiles data", error_cleanup); r = Esys_RSA_Decrypt_Async(context->esys, command->key_handle, context->session1, ESYS_TR_NONE, ESYS_TR_NONE, auxData, &profile->rsa_decrypt_scheme, &null_data); goto_if_error(r, "Error esys rsa decrypt", error_cleanup); return TSS2_FAPI_RC_TRY_AGAIN; } goto_if_error_reset_state(r, "RSA decryption.", error_cleanup); if (plainTextSize) *plainTextSize = tpmPlainText->size; if (plainText) { *plainText = malloc(tpmPlainText->size); goto_if_null(*plainText, "Out of memory", TSS2_FAPI_RC_MEMORY, error_cleanup); memcpy(*plainText, &tpmPlainText->buffer[0], tpmPlainText->size); free(tpmPlainText); } context-> state = DATA_DECRYPT_FLUSH_KEY; return TSS2_FAPI_RC_TRY_AGAIN; statecase(context->state, DATA_DECRYPT_WAIT_FOR_SYM_ENCRYPTION); r = ifapi_sym_encrypt_decrypt_finish(context, plainText, plainTextSize, TPM2_YES); return_try_again(r); goto_if_error_reset_state(r, "Symmetric encryption.", error_cleanup); fallthrough; statecase(context->state, DATA_DECRYPT_CLEANUP) r = ifapi_cleanup_session(context); try_again_or_error_goto(r, "Cleanup", error_cleanup); break; statecasedefault(context->state); } context->state = _FAPI_STATE_INIT; /* Cleanup of local objects */ SAFE_FREE(tpmPlainText); /* Cleanup of command related objects */ ifapi_cleanup_ifapi_object(command->key_object); for (size_t i = 0; i < command->numPaths; i++) { SAFE_FREE(command->pathlist[i]); } SAFE_FREE(command->pathlist); /* Cleanup of encryption data related objects */ ifapi_cleanup_policy_harness(&encData->sym_policy_harness); SAFE_FREE(encData->sym_private.buffer); /* Cleanup of context related objects */ ifapi_cleanup_ifapi_object(&context->createPrimary.pkey_object); ifapi_cleanup_ifapi_object(context->loadKey.key_object); LOG_TRACE("finsihed"); return TSS2_RC_SUCCESS; error_cleanup: /* Cleanup of local objects */ SAFE_FREE(tpmPlainText); /* Cleanup of command related objects */ ifapi_cleanup_ifapi_object(command->key_object); ifapi_cleanup_policy_harness(&encData->sym_policy_harness); for (size_t i = 0; i < command->numPaths; i++) { SAFE_FREE(command->pathlist[i]); } SAFE_FREE(command->pathlist); /* Cleanup of encryption data related objects */ SAFE_FREE(encData->cipher.buffer); SAFE_FREE(encData->sym_private.buffer); /* Cleanup of context related objects */ if (command->key_handle != ESYS_TR_NONE) Esys_FlushContext(context->esys, command->key_handle); ifapi_session_clean(context); ifapi_cleanup_ifapi_object(&context->createPrimary.pkey_object); ifapi_cleanup_ifapi_object(&context->loadKey.auth_object); ifapi_cleanup_ifapi_object(context->loadKey.key_object); return r; }
// NewCustomClient insantiates a proxybonanza API client with the given key and the given http.Client. func NewCustomClient(key string, client *http.Client) *APIClient { return &APIClient{ Key: key, KnownPackages: make(map[int]PackageDetails), c: client, } }
#include <cassert> #include <pathfinder/pathfinder.h> int main(int argc, char *argv[]) { int mapWidth = 500; int mapHeight = 500; unsigned int mapSize = mapWidth * mapHeight; unsigned char* map = new unsigned char[mapSize]; const unsigned int maxSteps = 900; for (int y = 0; y < mapHeight; ++y) for (int x = 0; x < mapWidth; ++x) { map[x + y * mapWidth] = (x == mapWidth - 3) ? 1 : 0; } pathfinder::Vec2 start(0, mapHeight / 2); pathfinder::Vec2 goal(mapWidth - 1, mapHeight / 2); int* outBuffer = new int[maxSteps]; int pathLength = pathfinder::FindPath(start.x, start.y, goal.x, goal.y, map, mapWidth, mapHeight, outBuffer, maxSteps); assert(pathLength == -1); pathLength = pathfinder::FindPath(start.x, start.y, goal.x, goal.y, map, mapWidth, mapHeight, outBuffer, maxSteps); assert(pathLength == -1); return 0; }
// SendOk sends a success response to the browser extension in the predefined json format func SendOk(data interface{}) { SendRaw(&okResponse{ Status: "ok", Version: version.Code, Data: data, }) }
<reponame>yuweijun/books #include<dirent.h> #include<stdio.h> #include<stdlib.h> int main(int argc, char *argv[]) { char *p; p = getcwd(NULL, 128); printf("current path:%s\n", p); free(p); chdir("/home"); printf("new path:%s\n", get_current_dir_name()); }
// CodeSigningPolicy_Values returns all elements of the CodeSigningPolicy enum func CodeSigningPolicy_Values() []string { return []string{ CodeSigningPolicyWarn, CodeSigningPolicyEnforce, } }
/** * PUT /users : Updates an existing User. * * @param userData the user to update * @throws UserAccountException when the login already exists. * @return the ResponseEntity with status 200 (OK) and with body the updated user, * or with status 400 (Bad Request) if the login or email is already in use, * or with status 500 (Internal Server Error) if the user couldn't be updated */ @SecuredByAuthority({AuthorityConstants.PODIUM_ADMIN, AuthorityConstants.BBMRI_ADMIN}) @PutMapping("/users") public ResponseEntity<ManagedUserRepresentation> updateUser(@Valid @RequestBody UserRepresentation userData) throws UserAccountException { log.debug("REST request to update User : {}", userData); userService.updateUser(userData); Optional<ManagedUserRepresentation> userOptional = userService.getUserByUuid(userData.getUuid()); if (userOptional.isPresent()) { return ResponseEntity.ok(userOptional.get()); } throw new ResourceNotFound("User not found."); }
Remember how great mood rings were in their pre-millennium heyday? BMW has reportedly taking that idea and pushed it into the 21st century to keep up with the ever-changing auto industry. First introduced as part of BMW’s centennial celebration last year, the German company proposed MINI VISION NEXT 100 – a car of the future that would be the ultimate personalized vehicle but still designed for sharing. Now, it appears BMW and MINI have successfully developed a car that changes colors based on the driver’s mood, according to Bloomberg, which reported that BMW has a version of the mood MINI at a test track. Per Bloomberg: In the corners, BMW shows off the vehicles it sees as key to its future: a self-driving iNext with a retractable steering wheel; a Rolls-Royce whose roof and sides swing open to allow a comfortable exit; a Mini designed for sharing that changes colors to suit the driver’s mood. According to BMW’s website description of the MINI VISION NEXT 100, the car “recognises its users, which allows it to offer personalised mobility.” And based on how the driver is feeling or what he or she wants to look like behind the wheel, “(t)he use of colour on the exterior and the projections onto the exterior panels can change” as well. If the car makes it out of the test track, this could be the craziest and most stylish method of ride sharing.
/** * \file history.c * \brief All functions on alias * \author <NAME>. * \version 1.0 * \date February 22, 2018 * * File containing all the history functions */ #include <stdio.h> #include <stdbool.h> #include <string.h> #include <stdlib.h> #include "../include/typedef.h" #include "../include/check.h" #include "../include/reg.h" #include "../include/history.h" #include "../include/define.h" int historyResearch; FILE *fpHistory = NULL; int commandNumber = -1; void manageHistory(Node *node) { if(strcmp(node->action->arguments, "") == 0) printHistory("", -1, NULL); else { const char *searchRegex = "(^(-s|--search)\\s[[:alnum:]]+)"; char *historyCommand = NULL; if(checkRegex(searchRegex, &historyCommand, node->action->arguments)) { //Remove "-s " or "--search " while(*historyCommand != ' '){ historyCommand++; } historyCommand++; printHistory(historyCommand, -1, NULL); } } } void printHistory(char *search, int commandNumber, char **command) { FILE *file; CHECK((file = fopen("/tmp/shellterHistory", "r")) != NULL); if(file) { char * line = NULL; size_t len = 0; ssize_t read; int i = 0; while ((read = getline(&line, &len, file)) != -1) { if(strlen(search) > 0) { if(strstr(line, search)) { printf("%s", line); } } else if(commandNumber != -1) { if(i == commandNumber) { strcpy((*command),extractHistoryCommand(line)); break; } } else { printf("%s", line); } i++; } fclose(file); } } char* extractHistoryCommand(char *command) { while(*command != '\t'){ // Remove number command++; } command++; // remove tabulation command[strlen(command) -1] = '\0'; // Replace \n with end string return command; } void readHistory(int way, char **str, int commandNumber) { if(way == 0 && historyResearch > 0) historyResearch--; else if(way == 1 && historyResearch < commandNumber) historyResearch++; printHistory("", historyResearch, str); } void resetHistoryCounter(int commandNumber) { historyResearch = commandNumber; } void retrieveCommandNumber() { FILE *fpHistory; CHECK((fpHistory = fopen("/tmp/shellterHistory", "a+")) != NULL); fseek(fpHistory, 0, SEEK_END); if(ftell(fpHistory) > 0) commandNumber = getCmdNum(fpHistory); else commandNumber = 1; fclose(fpHistory); } int getCmdNum(FILE *fpHistory) { int c; int i = -2; fseek(fpHistory, i, SEEK_END); while((c = fgetc(fpHistory)) != '\n') { i--; fseek(fpHistory, i, SEEK_END); } char *nbStr = malloc(BUFFER_SIZE * sizeof(char)); memset(nbStr, 0, BUFFER_SIZE * sizeof(char)); int l = 0; while((c = fgetc(fpHistory)) <= '9' && c >= '0' && c != EOF) { i++; fseek(fpHistory, i, SEEK_SET); nbStr[l] = (char)c; l++; } nbStr[l] = '\0'; int number; if (sscanf(nbStr, "%d", &number) == 1) { return number + 1; } return 1; } void writeToFile(char* command){ CHECK((fpHistory = fopen("/tmp/shellterHistory", "a")) != NULL); fprintf(fpHistory, "%d\t%s\n", commandNumber, command); commandNumber++; fclose(fpHistory); }
/** * Compiler message listener. */ @SuppressWarnings("serial") private static class ErrorListener extends ArrayList<Diagnostic<? extends JavaFileObject>> implements DiagnosticListener<JavaFileObject> { /** * {@inheritDoc} */ @Override public void report(Diagnostic<? extends JavaFileObject> diagnostic) { add(diagnostic); } }
The union representing registered nurses in Newfoundland and Labrador is fired up and speaking out following controversial comments from the deputy minister of health. Union president Debbie Forward is accusing the government of bad faith bargaining and outright betrayal of the more than 5,500 RNs in the province. "Our members today are feeling like they've been slapped in the face by this government. And quite frankly they're angry and I'm angry at the comments that were made," Forward said in an interview. Nurses not productive Meanwhile, deputy health minister John Abbott, the man who caused the furor by saying things like, "We don't have as productive of a nursing workforce as we should," is softening his position. In a statement released Friday, Abbot said his comments were not intended to reflect government policy. "As such, I apologize for any heightened concern my remarks may have caused‎," he wrote. John Abbott is Newfoundland and Labrador's deputy minister of Health and Community Services. (Sherry Vivian/CBC) It all stems from a population symposium at Memorial University recently, in which Abbott was a panellist. He made a series of strong comments about the challenges with health care in the province, and later agreed to an interview with CBC News. In very candid terms, Abbott suggested there are too many nurses and doctors in the province, that every aspect of the system will undergo significant change in the coming years, and that the health care, at $3 billion annually, was simply unaffordable. "We have put in a lot of services and facilities that we no longer need and no longer can afford," he said. Replacing RNs with LPNs When asked about a new campaign by the nurses union to lobby for more RNs, Abbott said he's reached a different conclusion. He said the province already has far more nurses than the national average, and that sick leave rates were unacceptably high. "I think to say we need more nurses in light of that [I] would suggest probably isn't the answer," he said, adding that he believes there are opportunities to replace RNs with licensed practical nurses, who have less training and are paid less. It's rare that a deputy minister would agree to an interview, and Abbott's candour raised eyebrows, especially with the nurses' union. "Essentially this government has told registered nurses that they're not working hard enough. That they really don't have a workload issue. That there are too many of them in this province. That they're going to be replaced by lesser-trained workers in the future," said Forward. Delicate bargaining process The tension comes at a delicate time, with the province and the nurses union engaged in collective bargaining. Forward said the reaction from her members was swift and widespread. "My members are ringing my phone off the hook. I'm getting email after email saying, 'What is this government up to? How can we trust them? How can we trust that they're going to do anything positive for health care?" she said. Essentially this government has told registered nurses that they're not working hard enough. - Debbie Forward Forward said it appears government doesn't understand the challenging circumstances faced by RNs. She said nurses are struggling to provide good quality care because of a shortage of staff, which is resulting in difficult working conditions and heightened stress levels. This, she said, is contributing to the high rates of sick leave. "If they could go to work with reasonable workloads, then they probably wouldn't need to call in sick tomorrow because they can't face another shift like the shift they worked today," Forward said. Twenty-four-hour shifts are still occurring, she added, and there were more than 1,200 shifts last year where a nurse worked more than 16 hours. Forward said the staffing shortage is so acute that it's common for nurses to be denied a day of leave, even if the request is made six months in advance. She said there's a desperate need for more nurses in order to reduce sick-leave rates, improve care and actually save money for the government. "We've given them all that information, and yet in the face of all that, they have the audacity to come out and say we have too many of you. Really? They're not listening to people who are on the ground," Forward said. As for Newfoundland and Labrador's larger than average nursing workforce, Forward said that's a consequence of geography, and a largely unhealthy population.
NBC is reportedly producing a series that will take place in Baltimore and will feature a Baltimore native as one of the show’s executive producers, according to Variety. The Los Angeles-based entertainment magazine reported that the network is developing a drama following a newly elected African-American female mayor, a female chief of police and a district attorney “as they work for the good of one of America’s most dangerous cities.” Penny Johnson Jerald, a Baltimore actress who has been featured in “The Larry Sanders Show” and “Star Trek: Deep Space Nine,” will be on the team of executive producers, which also includes Allison Abner, who wrote for Netflix’s “Narcos” and “West Wing,” Variety wrote. Since only a dozen black women have served as mayor of a major American city, it’s likely that the potential series has played off Baltimore’s political history, which has included three female African-American mayors in a row, including Sheila Dixon, Stephanie Rawlings-Blake and now Catherine E. Pugh. Read more about Baltimore’s black female mayors here. CAPTION Actor Kevin Spacey has been spotted in Baltimore recently. Scheduled to appear in court Jan. 7 in Nantucket to be arraigned on the indecent assault and battery charges, Spacey could face as many as five years in prison if convicted. Actor Kevin Spacey has been spotted in Baltimore recently. Scheduled to appear in court Jan. 7 in Nantucket to be arraigned on the indecent assault and battery charges, Spacey could face as many as five years in prison if convicted. CAPTION Dirt bike riders were seen zipping down North Monroe Street in West Baltimore and popping wheelies Tuesday while filming a movie scene. Based on a casting call posted to the Maryland Film Office’s website , it appears the “ride scenes” were shot for the feature film “Charm City,” alternately called “12 O’Clock Boys.” The film will reportedly be executive produced by Will Smith and is based on the 2013 documentary “12 O’Clock Boys” directed by Maryland Institute College of Art alum Lotfy Nathan. Dirt bike riders were seen zipping down North Monroe Street in West Baltimore and popping wheelies Tuesday while filming a movie scene. Based on a casting call posted to the Maryland Film Office’s website , it appears the “ride scenes” were shot for the feature film “Charm City,” alternately called “12 O’Clock Boys.” The film will reportedly be executive produced by Will Smith and is based on the 2013 documentary “12 O’Clock Boys” directed by Maryland Institute College of Art alum Lotfy Nathan. [email protected] twitter.com/brittanybritto BEST OF BALTIMORE INSIDER Hold up, 'Hon': Baltimore's black vernacular youthful, dynamic if less recognized than 'Bawlmerese' 'Real Housewives of Potomac' preview: Gizelle Bryant, Robyn Dixon talk Season 2 drama Driver catches a typo on I-95 sign that had gone unnoticed for months
<gh_stars>0 """ @author: oxhagolli Game objects stuff """ import abc import pygame class GameObject: def __init__(self, source, screen): self.image = pygame.image.load(source) self.screen = screen @abc.abstractmethod def render(self): pass class VisibleObject(GameObject): def __init__(self, source, screen, x=0, y=0, theta=0): super().__init__(source, screen) self.x = x self.y = y self.theta = theta def render(self): rotate = pygame.transform.rotate(self.image, self.theta) self.screen.blit(rotate, (self.x, self.y)) class Background(GameObject): def __init__(self, source, screen): if isinstance(source, str): super().__init__(source, screen) else: self.image = source self.screen = screen def render(self): if isinstance(self.image, pygame.Surface): self.screen.blit(self.image, (0, 0)) else: self.screen.fill(self.image)
/** * Creates a customized error message based upon the attributes set in the jsf-file. * * @param parseException * ParseException * @param component * the associated UIComponent */ private void createErrorMessage(ParseException parseException, UIComponent component) { if (logger.isDebugEnabled()) { logger.debug("Parsing failed and a error message will be returned to the user."); } String fieldName = ((String) component.getAttributes().get(FIELD_NAME) == null) ? "" : (String) component .getAttributes().get(FIELD_NAME); String errorMsg = getStringAttribute(component, ERROR_MSG); String localePattern = getStringAttribute(component, LOCALE_PATTERN); if (localePattern == null || "".equalsIgnoreCase(localePattern)) { String[] dateFormatArray = getDateFormatArray(getStringAttribute(component, PATTERN)); localePattern = dateFormatArray[0]; if (logger.isDebugEnabled()) { logger.debug("The localePattern is '" + localePattern + "' and is chosen from the first element in the array of legal date formats."); } } List<String> argMessages = new ArrayList<String>(); String[] arguments = new String[] { fieldName, localePattern }; /* Gets all of the arguments from the resource bundle. */ for (String argument : arguments) { if (StringUtils.isBlank(argument)) { argMessages.add(argument.toString()); } else { String msg = MessageContextUtil.getMessage(argument.toString(), null); argMessages.add(msg); } } String msg = MessageContextUtil.getMessage(errorMsg, argMessages.toArray()); throw new ConverterException(new FacesMessage(msg, msg), parseException); }
/** * Functional tests that test the batch ingest feature flag while the feature is on. */ @RunWith(VertxUnitRunner.class) public class BatchIngestFfOnT extends BaseFesterFfT { /* Our feature flag test logger */ private static final Logger LOGGER = LoggerFactory.getLogger(BatchIngestFfOnT.class, Constants.MESSAGES); /** * Tests that the POST collections endpoint returns a 201 on success. * * @param aContext A testing context */ @Test public final void testCsvPostEndpoint(final TestContext aContext) { final Async asyncTask = aContext.async(); // Create our bucket so we have some place to put the manifests myS3Client.createBucket(BUCKET); myWebClient.post(FESTER_PORT, Constants.UNSPECIFIED_HOST, Constants.POST_CSV_ROUTE) .sendMultipartForm(CSV_UPLOAD_FORM, request -> { if (request.succeeded()) { // Check that we get a 201 response code aContext.assertEquals(HTTP.CREATED, request.result().statusCode()); TestUtils.complete(asyncTask); } else { aContext.fail(request.cause()); } }); } /** * Tests that the upload form has been replaced by a feature off notification. * * @param aContext A testing context */ @Test public final void testCsvUploadPage(final TestContext aContext) { final Async asyncTask = aContext.async(); myWebClient.get(FESTER_PORT, Constants.UNSPECIFIED_HOST, UPLOAD_FORM_PATH).send(request -> { if (request.succeeded()) { final String html = request.result().bodyAsString(); final Element h1 = Jsoup.parse(html).selectFirst("h1"); // Check that we get the expected CSV upload page if (h1 != null) { aContext.assertEquals("CSV Upload", h1.text()); } else { aContext.fail(LOGGER.getMessage(MessageCodes.MFS_087)); } TestUtils.complete(asyncTask); } else { aContext.fail(request.cause()); } }); } }
/** * Provides Intent to setting activity for the specified autofill service. */ static final class AutofillSettingIntentProvider implements SettingIntentProvider { private final String mSelectedKey; private final PackageManager mPackageManager; public AutofillSettingIntentProvider(PackageManager packageManager, String key) { mSelectedKey = key; mPackageManager = packageManager; } @Override public Intent getIntent() { final List<ResolveInfo> resolveInfos = mPackageManager.queryIntentServices( AUTOFILL_PROBE, PackageManager.GET_META_DATA); for (ResolveInfo resolveInfo : resolveInfos) { final ServiceInfo serviceInfo = resolveInfo.serviceInfo; final String flattenKey = new ComponentName( serviceInfo.packageName, serviceInfo.name).flattenToString(); if (TextUtils.equals(mSelectedKey, flattenKey)) { final String settingsActivity; try { settingsActivity = new AutofillServiceInfo(mPackageManager, serviceInfo) .getSettingsActivity(); } catch (SecurityException e) { // Service does not declare the proper permission, ignore it. Log.w(TAG, "Error getting info for " + serviceInfo + ": " + e); return null; } if (TextUtils.isEmpty(settingsActivity)) { return null; } return new Intent(Intent.ACTION_MAIN).setComponent( new ComponentName(serviceInfo.packageName, settingsActivity)); } } return null; } }
/** * Test that when the ID is null and the name is not null, the id is * eventually set to be the same as the name and the type can be detected. */ @Test public void testDefinitionWithNullID() { final String id1 = "<Definition>\n <ID />\n <Name>CRIS3 v1.s3d</Name>\n</Definition>"; final Object o1 = this.xstream.fromXML(id1); final Definition cp = (Definition) o1; assertEquals(cp.getId(), "CRIS3 v1.s3d"); assertEquals(cp.getType(), DefinitionType.EDIMessageDefinition); }
export type TextType = 'span' | 'paragraph'
import unittest import application class BasicTestCase(unittest.TestCase): def test_login(self): self.app = application.app.test_client() ans = self.app.get('/login') self.assertEqual(ans.status_code, 200) def test_home(self): self.app = application.app.test_client() ans = self.app.get('/home') self.assertEqual(ans.status_code, 302) def test_forgotPassword(self): self.app = application.app.test_client() ans = self.app.get('/forgotPassword') self.assertEqual(ans.status_code, 302) def test_dashboard(self): self.app = application.app.test_client() ans = self.app.get('/dashboard') self.assertEqual(ans.status_code, 200) def test_about(self): self.app = application.app.test_client() ans = self.app.get('/about') self.assertEqual(ans.status_code, 200) def test_register(self): self.app = application.app.test_client() ans = self.app.get('/register') self.assertEqual(ans.status_code, 200) def test_task(self): self.app = application.app.test_client() ans = self.app.get('/task') self.assertEqual(ans.status_code, 302) def test_editTask(self): self.app = application.app.test_client() ans = self.app.get('/editTask') self.assertEqual(ans.status_code, 200) def test_updateTask(self): self.app = application.app.test_client() ans = self.app.get('/updateTask') self.assertEqual(ans.status_code, 302) def test_logout(self): self.app = application.app.test_client() ans = self.app.get('/logout') self.assertEqual(ans.status_code, 200) def test_dummy(self): self.app = application.app.test_client() ans = self.app.get('/dummy') self.assertEqual(ans.status_code, 200) def test_delete(self): self.app = application.app.test_client() ans = self.app.get('/deleteTask') self.assertEqual(ans.status_code, 200) if __name__ == '__main__': unittest.main()
<filename>lang/Haskell/Problem1.hs module ProjectEuler.Problem1.Problem1 where solList = filter (\n -> (rem n 5 == 0) || (rem n 3 == 0)) [1..999] main = do print $ sum solList
#include <algorithm> #include <iostream> #include <limits.h> #include <stdlib.h> #include <string> #include <vector> #define el endl #define fd fixed #define INF INT_MAX/2-1 using namespace std; int main() { int n, a, b, c, x, tmp, frame; while (cin >> n >> a >> b >> c >> x, n|a|b|c|x) { vector<int> y; frame = 0; for (int i = 0; i < n; i++) { cin >> tmp; y.push_back(tmp); } if (y[0] == x) y.erase(y.begin()); while (!y.empty() && frame <= 10000) { frame++; x = (a*x+b)%c; if (x == y[0]) y.erase(y.begin()); } if (frame == 10001) cout << "-1" << el; else cout << frame << el; } }
import torch BUFFER_SIZE = int(1e5) # replay buffer size BATCH_SIZE = 192 # minibatch size GAMMA = 0.99 # discount factor TAU = 1e-3 # for soft update of target parameters LR_ACTOR = 1.5e-4 # learning rate of the actor LR_CRITIC = 1.5e-4 # learning rate of the critic WEIGHT_DECAY = 0.0001 # L2 weight decay device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
<gh_stars>0 package seedu.restaurant.testutil; import seedu.restaurant.model.RestaurantBook; import seedu.restaurant.model.account.Account; import seedu.restaurant.model.ingredient.Ingredient; import seedu.restaurant.model.menu.Item; import seedu.restaurant.model.reservation.Reservation; import seedu.restaurant.model.sales.SalesRecord; /** * A utility class to help with building RestaurantBook objects. * Example usage: {@code RestaurantBook ab = new RestaurantBookBuilder().withAccount(account).build();} */ public class RestaurantBookBuilder { private RestaurantBook restaurantBook; public RestaurantBookBuilder() { restaurantBook = new RestaurantBook(); } public RestaurantBookBuilder(RestaurantBook restaurantBook) { this.restaurantBook = restaurantBook; } //@@author HyperionNKJ /** * Adds a new {@code Record} to the {@code RestaurantBook} that we are building. */ public RestaurantBookBuilder withRecord(SalesRecord record) { restaurantBook.addRecord(record); return this; } /** * Adds a new {@code Account} to the {@code RestaurantBook} that we are building. */ public RestaurantBookBuilder withAccount(Account account) { restaurantBook.addAccount(account); return this; } //@@author yican95 /** * Adds a new {@code Item} to the {@code RestaurantBook} that we are building. */ public RestaurantBookBuilder withItem(Item item) { restaurantBook.addItem(item); return this; } //@@author m4dkip /** * Adds a new {@code Reservation} to the {@code RestaurantBook} that we are building. */ public RestaurantBookBuilder withReservation(Reservation reservation) { restaurantBook.addReservation(reservation); return this; } /** * Adds a new {@code Ingredient} to the {@code RestaurantBook} that we are building. */ public RestaurantBookBuilder withIngredient(Ingredient ingredient) { restaurantBook.addIngredient(ingredient); return this; } public RestaurantBook build() { return restaurantBook; } }
def clean_spaces(m): return m.group(0).replace(" ", "=20")
import { IPMACAction } from './action.interface' import { PostmanCollection } from '../api/types/collection.types' import { PMACWorkspace } from '../../file-system/types' import { PMACMap, TfsWorkspaceManager, TfsWorkspaceResourceManager } from '../../file-system' import { WorkspaceResource } from '../api/types' export class PMACCollectionCreateAction implements IPMACAction<void> { constructor( private readonly pmacWorkspace: PMACWorkspace, private readonly pmCollection: PostmanCollection, private readonly fsWorkspaceManager: TfsWorkspaceManager, private readonly fsWorkspaceResourceManager: TfsWorkspaceResourceManager, // Optional, only when created with push private readonly pmCollectionUid?: string, ) {} async run() { if (!this.pmacWorkspace.pmacID) { throw new Error('pmac workspace id not found') } const pmacID = PMACMap.generatePMACuuid() const pmIDTmp = PMACMap.generateTemporaryPMACuuid() await this.fsWorkspaceResourceManager.writeWorkspaceResourceDataJson<WorkspaceResource.Collection>({ name: this.pmCollection.info.name, type: WorkspaceResource.Collection, pmacID, pmIDTmp, workspaceName: this.pmacWorkspace.name, workspacePMACId: this.pmacWorkspace.pmacID, workspaceType: this.pmacWorkspace.type, pmID: this.pmCollection.info._postman_id, pmUID: this.pmCollectionUid || '', }, this.pmCollection) this.pmacWorkspace.collections.push({ pmacID, pmID: this.pmCollection.info._postman_id, pmIDTmp, pmUID: '' }) await this.fsWorkspaceManager.writeWorkspaceDataJson(this.pmacWorkspace) } }
<filename>Task/src/test/java/com/utils/CommonMethods.java package com.utils; import com.testbase.BaseClass; import org.openqa.selenium.WebElement; import org.openqa.selenium.support.ui.ExpectedConditions; import org.openqa.selenium.support.ui.WebDriverWait; import java.util.Set; public class CommonMethods extends BaseClass { /** * Method creates WebDriverWait object * * @return WebDriverWait */ public static WebDriverWait getWait() { return new WebDriverWait(driver, 15); } /** * Explicit wait for element clickability * * @param element * @return WebElement */ public static WebElement waitForClickability(WebElement element) { return getWait().until(ExpectedConditions.elementToBeClickable(element)); } /** * Method clicks on element * * @param element */ public static void click(WebElement element) { waitForClickability(element).click(); } /** * Explicit wait for element visibility * * @param element * @return WebElement */ public static WebElement waitForVisibility(WebElement element) { return getWait().until(ExpectedConditions.visibilityOf(element)); } /** * Method sends text to the text box * * @param element * @param text */ public static void sendText(WebElement element, String text) { waitForVisibility(element).clear(); element.sendKeys(text); } /** * Method switches focus to child window */ public static void switchToChildWindow() { String mainWindow = driver.getWindowHandle(); Set<String> windows = driver.getWindowHandles(); for (String window : windows) { if (!window.equals(mainWindow)) { driver.switchTo().window(window); break; } } } }
Rajon Rondo and DeMarcus Cousins, two of the surliest dudes in the NBA, teamed up to pull off a remarkable act of asshole behavior during the final seconds of last night’s game against the Wizards. With six seconds left on the clock and the Kings up by 14, Rondo improperly inbounded the ball. Referee Marc Davis could have just ignored the violation and let the clock run out, but he chose to make the proper call, thus extending the game a few more seconds. Rondo and Cousins found this to be unacceptable, and hit Davis with the “YEAH, NICE JOB, BUDDY!” sarcastic clap: Rondo was ejected from the game, and Cousins earned his 16th technical of the season, which means he will have to miss the Kings’ next game. Nice going, fellas. *SARCASTIC CLAPPING AT MY COMPUTER* Photo via AP
import java.io.*; public class B{ public static void main(String[] args)throws Exception { BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(System.out)); br.readLine(); String[] num=br.readLine().split(" "); String[] st=br.readLine().split(" "); int s1=Integer.parseInt(st[0])-1; int tmp=Integer.parseInt(st[1])-1; int s2=s1>tmp?s1:tmp; s1=s1<tmp?s1:tmp; int sum=0; int sum1=0; for (int i = 0; i < num.length; i++) { int t=Integer.parseInt(num[i]); if(i>=s1&&i<s2){ sum1+=t; } sum+=t; } sum-=sum1; bw.write((sum<sum1?sum:sum1)+"\n"); bw.close(); br.close(); } }
package app.source; import bean.Pair; import bean.LSException; import bean.Source; import org.apache.log4j.Logger; import org.nutz.ssdb4j.SSDBs; import org.nutz.ssdb4j.spi.Response; import org.nutz.ssdb4j.spi.SSDB; import util.SqlliteUtil; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ArrayBlockingQueue; /** * 非线程安全 * <p> * 连接后不断开持续取数据,无数据即阻塞; * 定量(10000)更新point点,在异常断开后重启可以继续[异常断连会造成丢失数据] */ @Deprecated public class SsdbPull extends Thread { private final Logger logger = Logger.getLogger(SsdbPull.class); private final int limit = 500; private Object point; private String ip; private int port; private String name; private Source.Type type; public final int timeout; private final int waitMills = 10000; //need lower than timeout private String indexName; private boolean stop = false; public final ArrayBlockingQueue<Pair<Object, String>> queue = new ArrayBlockingQueue<>(limit * 2); public SsdbPull(String ip, int port, String name, Source.Type type, String indexName) { this(ip, port, name, type, indexName, 15000); } private SsdbPull(String ip, int port, String name, Source.Type type, String indexName, int timeout) { this.ip = ip; this.port = port; this.name = name; this.type = type; this.indexName = indexName; this.timeout = timeout; } /** * 创建单连接 * * @return SSDB {@link org.nutz.ssdb4j.spi.SSDB} */ private SSDB connect() { return SSDBs.simple(ip, port, timeout); } private void poll() throws LSException { logger.debug("polling ssdb." + name + " data to index"); initPoint(); try (SSDB ssdb = this.connect()) { // int remaining; // do { // long start = System.currentTimeMillis(); // List<ImmutablePair<Object, String>> pairs = pollOnce(ssdb); // if (!pairs.isEmpty()) for (ImmutablePair<Object, String> pair : pairs) { // queue.put(pair); // } // remaining = pairs.size(); // long end = System.currentTimeMillis(); // logger.debug("pollOnce[" + pairs.size() + "] cost time[" + (end - start) + "] mills"); // } while (remaining > 0); int counter = 0; while (!stop) { long start = System.currentTimeMillis(); List<Pair<Object, String>> pairs = pollOnce(ssdb); if (!pairs.isEmpty()) for (Pair<Object, String> pair : pairs) { queue.put(pair); } counter += pairs.size(); long end = System.currentTimeMillis(); logger.debug("pollOnce[" + pairs.size() + "] cost time[" + (end - start) + "] mills"); if (pairs.size() == 0) Thread.sleep(waitMills); if (counter >= 10000) { SqlliteUtil.getInstance().update("update ssdb set point=? where name=?", point, indexName); counter = 0; } } SqlliteUtil.getInstance().update("update ssdb set point=? where name=?", point, indexName); } catch (LSException | SQLException e) { logger.warn(e.getMessage(), e); } catch (Exception e) { throw new LSException("poll ssdb." + name + " error", e); } logger.debug("ssdb pull has stopped..."); } /* String createSql = "CREATE TABLE ssdb(" + "name TEXT PRIMARY KEY NOT NULL, " + "point TEXT NOT NULL," + "pid INT DEFAULT 0" + ")";*/ private void initPoint() throws LSException { try { List<Object> points = SqlliteUtil.getInstance().queryL( "select point from ssdb where name=?", indexName); String _point = String.valueOf(points.get(0)); switch (type) { case LIST: point = Integer.parseInt(_point); break; case HASH: point = _point; break; default: throw new LSException("ssdb type [" + type + "] is not support..."); } } catch (Exception e) { throw new LSException("初始化ssdb." + name + "的point信息出错", e); } } private List<Pair<Object, String>> pollOnce(SSDB ssdb) throws LSException { switch (type) { case LIST: return listScan(ssdb, (int) point); case HASH: return hashScan(ssdb, (String) point); default: throw new LSException("ssdb type [" + type + "] is not support..."); } } private List<Pair<Object, String>> listScan(SSDB ssdb, int offset) { Response response = ssdb.qrange(name, offset, limit); if (response.datas.size() == 0) return Collections.emptyList(); else { // response.datas.parallelStream() // .map(v -> new String(v, SSDBs.DEFAULT_CHARSET)) // .collect(Collectors.toList()); List<Pair<Object, String>> list = new ArrayList<>(response.datas.size()); for (int i = 0; i < response.datas.size(); i++) { Pair<Object, String> pair = Pair.of(offset + i, new String(response.datas.get(i), SSDBs.DEFAULT_CHARSET)); list.add(pair); } point = offset + response.datas.size(); return list; } } private List<Pair<Object, String>> hashScan(SSDB ssdb, String key_start) { Response response = ssdb.hscan(name, key_start, "", limit); if (response.datas.size() == 0) return Collections.emptyList(); else { List<Pair<Object, String>> list = new ArrayList<>(response.datas.size()); for (int i = 0; i < response.datas.size(); i += 2) { String key = new String(response.datas.get(i), SSDBs.DEFAULT_CHARSET); if (i == response.datas.size() - 2) point = key; Pair<Object, String> pair = Pair.of(key, new String(response.datas.get(i + 1), SSDBs.DEFAULT_CHARSET)); list.add(pair); } return list; } } /** * When an object implementing interface <code>Runnable</code> is used * to create a thread, starting the thread causes the object's * <code>run</code> method to be called in that separately executing * thread. * <p> * The general contract of the method <code>run</code> is that it may * take any action whatsoever. * * @see Thread#run() */ @Override public void run() { try { poll(); } catch (LSException e) { logger.error(e.getCause() == null ? e.getMessage() : e.getCause()); } } public void close() { logger.debug("stop ssdb pull..."); stop = true; try { Thread.sleep(waitMills + 10); } catch (InterruptedException ignore) { } } }
/** * * @author Sebastian Baechle * */ public class AbstractBPlusIndexTest { private static final Logger log = Logger .getLogger(AbstractBPlusIndexTest.class.getName()); protected static final int LOAD_SIZE = 20000; protected static final int REDUCED_LOAD_SIZE = LOAD_SIZE / 4; protected static final int NUMBER_OF_DUPLICATES = 10; protected BPlusIndex index; protected PageID uniqueRootPageID; protected PageID nonuniqueRootPageID; protected Random rand; protected Tx t1; protected Tx t2; protected IndexPageHelper indexPageHelper; protected SysMockup sm; protected final class Entry { final byte[] key; final byte[] value; Entry(byte[] key, byte[] value) { this.key = key; this.value = value; } @Override public boolean equals(Object obj) { return ((obj instanceof Entry) && (Field.UINTEGER.compare(((Entry) obj).key, key) == 0) && (Field.UINTEGER .compare(((Entry) obj).value, value) == 0)); } @Override public int hashCode() { return Field.UINTEGER.toString(key).hashCode(); } } public AbstractBPlusIndexTest() { super(); } protected String number(int i) { if (i < 10) return "00" + i; else if (i < 100) return "0" + i; else return "" + i; } protected void loadIndex(Tx transaction, List<Entry> entries, PageID rootPageID) throws IndexAccessException, IndexOperationException { int i = 0; for (Entry entry : entries) { index.insert(transaction, rootPageID, entry.key, entry.value); // printIndex(t2, nonuniqueRootPageID, "/media/ramdisk/" + // number(i++) + ".dot", true); // IndexPageHelper.checkIndexConsistency(transaction, sm.buffer, // rootPageID); assertEquals("fixed pages after insert", 0, sm.buffer.getFixCount()); byte[] readValue = index.read(transaction, rootPageID, entry.key); assertTrue("value for inserted key found", (readValue != null)); // assertTrue("read value is same as written value", // (Field.UINTEGER.compare(readValue, entry.value) == 0)); } // printIndex(t2, nonuniqueRootPageID, "/media/ramdisk/loaded.dot", // true); indexPageHelper.checkIndexConsistency(transaction, sm.buffer, rootPageID); } protected LinkedList<Entry> generateEntries(int loadSize, int valueAddon) { LinkedList<Entry> entries = new LinkedList<Entry>(); for (int i = 1; i <= loadSize; i++) { int intKey = i * 100; byte[] key = Calc.fromUIntVar(intKey); int intValue = i * 100 + valueAddon; byte[] value = Calc.fromUIntVar(intValue); entries.add(new Entry(key, value)); Calc.fromUIntVar(intKey); } return entries; } protected LinkedList<Entry> generateDuplicates(Entry entry, int numberOfDuplicates) { LinkedList<Entry> entries = new LinkedList<Entry>(); int intValue = Calc.toUIntVar(entry.value); for (int i = 1; i <= numberOfDuplicates; i++) { intValue++; byte[] value = Calc.fromUIntVar(intValue); entries.add(new Entry(entry.key, value)); } return entries; } protected void printIndex(Tx transaction, PageID rootPageID, String filename, boolean showValues) throws IndexAccessException { try { PrintStream printer = new PrintStream(new File(filename)); index.traverse(transaction, rootPageID, new DisplayVisitor(printer, showValues)); printer.close(); } catch (IOException e) { e.printStackTrace(); } } @After public void tearDown() throws BufferException { } @Before public void setUp() throws Exception { sm = new SysMockup(); index = new BPlusIndex(sm.bufferManager); indexPageHelper = new IndexPageHelper(sm.bufferManager); t1 = sm.taMgr.begin(); t2 = sm.taMgr.begin(); uniqueRootPageID = index.createIndex(t2, SysMockup.CONTAINER_NO, Field.UINTEGER, Field.UINTEGER, true, true); nonuniqueRootPageID = index.createIndex(t2, SysMockup.CONTAINER_NO, Field.UINTEGER, Field.UINTEGER, false, true); // use same random source to get reproducable results in case of an // error rand = new Random(12345678); IDAssigner.counter.set(0); } }
<filename>my_compiler/src/test_code.rs<gh_stars>1-10 fn tio(i: i32) -> i32 { if mut (i < 50) { return tio(i + *1); } else{ return mut i; } } fn main() { let mut a: i32 = mut 2; tio(&2, - &mut 3); while mut true { } }
import * as React from 'react'; import { Component } from 'react'; import { Switch, Route, Redirect } from 'react-router'; import routes from './constants/routes'; import sizes from './constants/sizes'; import App from './containers/App'; import HomePage from './containers/HomePage'; import DesignerPage from './containers/DesignerPage'; import DataStore from './classes/DataStore'; import IpcInterface from './classes/IpcInterface'; import { DndProvider } from 'react-dnd'; import { HTML5Backend } from 'react-dnd-html5-backend'; export default class Routes extends Component { private dataStore: DataStore = new DataStore(); private ipcInterface: IpcInterface = new IpcInterface(); render() { return ( <App> <Switch> <Route path={routes.HOME} component={() => { this.ipcInterface.resizeWindow( sizes.homeWindow.width, sizes.homeWindow.height ); return <HomePage dataStore={this.dataStore} />; }} /> <Route path={routes.DESIGNER} component={() => { this.ipcInterface.resizeWindow( sizes.designerWindow.width, sizes.designerWindow.height ); return ( <DndProvider backend={HTML5Backend}> <DesignerPage dataStore={this.dataStore} /> </DndProvider> ); }} /> <Redirect from="/" to="/home" /> </Switch> </App> ); } }
print((lambda x: (x[0]-1)//x[1]+1)(list(map(int,input().split()))))
#!/usr/bin/env python # -*- coding: utf-8 -*- """Main module.""" try: import configparser except ImportError: # Python 2 import ConfigParser as configparser from collections import namedtuple from datetime import datetime import logging import os import time from distroinfo import info, query from git_wrapper import exceptions as git_exceptions from git_wrapper.repo import GitRepo LOGGER = logging.getLogger("patch_rebaser") def find_patches_branch(repo, remote, distgit_branch): """Guess patches branch name""" # Adapted from rdopkg guess.find_patches_branch parts = distgit_branch.split('-') while parts: if 'trunk' in distgit_branch: branch = '%s-trunk-patches' % '-'.join(parts) else: branch = '%s-patches' % '-'.join(parts) LOGGER.debug("Checking if branch %s exists...", branch) if repo.branch.exists(branch, remote): return branch parts.pop() return None def create_patches_branch(repo, commit, remote, dev_mode=True): """Create new patches branch from commit""" branch_name = os.environ.get('PATCHES_BRANCH', None) if not branch_name: LOGGER.error("No PATCHES_BRANCH env var found, cannot create branch") return None if not repo.branch.create(branch_name, commit): LOGGER.error("Failed to create -patches branch") return None # Switch to the newly created branch # FIXME(jpena): Maybe include this in git_wrapper? repo.git.checkout(branch_name) _rebuild_gitreview(repo, remote, branch_name) # Finally, push branch before returning its name if dev_mode: LOGGER.warning("Dev mode: executing push commands in dry-run mode") repo.git.push("-n", remote, branch_name) else: timestamp = datetime.now().strftime("%Y%m%d%H%M%S") LOGGER.warning( "Pushing {branch} to {remote} ({timestamp})".format( branch=branch_name, remote=remote, timestamp=timestamp ) ) repo.git.push(remote, branch_name) return branch_name def get_downstream_distgit_branch(dlrn_projects_ini): """Get downstream distgit branch info from DLRN projects.ini""" config = configparser.ConfigParser() config.read(dlrn_projects_ini) return config.get('downstream_driver', 'downstream_distro_branch') def get_patches_branch(repo, remote, dlrn_projects_ini): """Get the patches branch name""" # Get downstream distgit branch from DLRN config distgit_branch = get_downstream_distgit_branch(dlrn_projects_ini) # Guess at patches branch based on the distgit branch name return find_patches_branch(repo, remote, distgit_branch) def parse_distro_info_path(path): """Break distro_info path into repo + file""" path = path.strip().rsplit("/", 1) info_repo = path[0] info_file = path[1] remote = False if info_repo.startswith("http"): remote = True return info_file, info_repo, remote def parse_gerrit_remote_url(url): """Break Gerrit remote url into host, port and project""" # We are expecting a remote URL in the format # protocol://host:port/project split_url = url.split('/') project = '/'.join(split_url[3:]) # The project part can contain slashes host_port = split_url[2].split(':') host = host_port[0] if len(host_port) > 1: port = host_port[1] else: port = '29418' # Default Gerrit port return host, port, project def get_distro_info(distroinfo_repo): """Set up distro_info based on path""" info_file, info_repo, remote = parse_distro_info_path(distroinfo_repo) if remote: di = info.DistroInfo(info_file, remote_git_info=info_repo) else: di = info.DistroInfo(info_file, local_info=info_repo) return di.get_info() def get_patches_repo(distroinfo_repo, pkg_name, key): """Get URL of repo with the patches branch""" distro_info = get_distro_info(distroinfo_repo) pkg = query.get_package(distro_info, pkg_name) repo = pkg.get(key) if not repo: LOGGER.warning("No %s repo listed for package %s", key, pkg_name) return repo def get_rebaser_configparser(defaults=None): """Return a configparser object for patch_rebaser config""" # Get the config file location based on path of currently running script config_file = os.path.realpath( "{0}/{1}".format( os.path.dirname(os.path.realpath(__file__)), "patch_rebaser.ini" ) ) if not os.path.exists(config_file): raise Exception( "Configuration file {0} not found.".format(config_file) ) parser = configparser.ConfigParser(defaults) parser.read(config_file) return parser def get_rebaser_config(defaults=None): """Return a tuple with the configuration information. If the configuration is missing an option, the value from the defaults dictionary is used. If the defaults doesn't contain the value either, a configparser.NoOptionError exception is raised. :param dict defaults: Default values for config values """ default_options = ['dev_mode', 'remote_name', 'git_name', 'git_email', 'packages_to_process', 'dlrn_projects_ini', 'create_patches_branch'] distroinfo_options = ['patches_repo_key'] RebaserConfig = namedtuple('RebaserConfig', default_options + distroinfo_options) parser = get_rebaser_configparser(defaults) options = {} for opt in default_options: if opt == 'dev_mode' or opt == 'create_patches_branch': options[opt] = parser.getboolean('DEFAULT', opt) elif opt == 'packages_to_process': pkgs = parser.get('DEFAULT', opt) if pkgs: pkgs = pkgs.split(",") if "," in pkgs else [pkgs] options[opt] = pkgs else: options[opt] = parser.get('DEFAULT', opt) if not parser.has_section('distroinfo'): parser.add_section('distroinfo') options['patches_repo_key'] = parser.get('distroinfo', 'patches_repo_key') return RebaserConfig(**options) def set_up_git_config(name, email): """Set up environment variables for git author and committer info. This is a pre-requisite for performing a git rebase operation. """ os.environ["GIT_AUTHOR_NAME"] = name os.environ["GIT_AUTHOR_EMAIL"] = email os.environ["GIT_COMMITTER_NAME"] = name os.environ["GIT_COMMITTER_EMAIL"] = email def generate_gitreview(path, project, host, port, branch, remote): """Write a new .gitreview file to disk. :param str path: Directory where the file will be written :param str project: Name of the project :param str host: Gerrit host :param str port: Port for Gerrit host :param str branch: Git branch to use as defaultbranch :param str remote: Git remote to use as defaultremote """ with open(os.path.join(path, '.gitreview'), 'w') as fp: fp.write("[gerrit]\n") fp.write("host=%s\n" % host) fp.write("port=%s\n" % port) fp.write("project=%s.git\n" % project) fp.write("defaultbranch=%s\n" % branch) fp.write("defaultremote=%s\n" % remote) fp.write("defaultrebase=1\n") def _rebuild_gitreview(repo, remote, branch): dlrn = get_dlrn_variables() url = repo.repo.remote(remote).url if isinstance(url, list): url = url[0] host, port, project = parse_gerrit_remote_url(url) generate_gitreview(dlrn.local_repo, project, host, port, branch, remote) # Now push the change repo.commit.commit('RHOS: use internal gerrit - DROP-IN-RPM\n\n' 'Change-Id: I400187d0e03127743aad09d859988991' 'e965ff7e') class Rebaser(object): def __init__(self, repo, branch, commit, remote, timestamp, dev_mode=True, max_retries=3): """Initialize the Rebaser :param git_wrapper.GitRepo repo: An initialized GitWrapper repo :param str branch: Branch name to rebase (same name on local and remote) :param str commit: Commit sha to rebase to :param str remote: Remote name to use as base and push rebase result to :param str timestamp: Timestamp used in tag to previous remote HEAD :param bool dev_mode: Whether to run the push commands as dry-run only :param int max_retries: How many retry attempts if remote changed during rebase """ self.repo = repo self.branch = branch self.commit = commit self.remote = remote self.timestamp = timestamp self.tag_name = "private-rebaser-{0}-previous".format(timestamp) self.dev_mode = dev_mode self.max_retries = max_retries self.remote_branch = "{0}/{1}".format(self.remote, self.branch) def rebase_and_update_remote(self): """Rebase the local branch to the specific commit & push the result.""" self.repo.remote.fetch(self.remote) # Reset the local branch to the latest self.repo.branch.create(self.branch, self.remote_branch, reset_if_exists=True) # Tag the previous branch's HEAD, before rebase self.repo.tag.create(self.tag_name, self.remote_branch) # Rebase self.perform_rebase() # Check if any new changes have come in self.repo.remote.fetch(self.remote) if not self.repo.commit.same(self.tag_name, self.remote_branch): if self.max_retries > 0: LOGGER.info("Remote changed during rebase. Remaining " "attempts: %s", self.max_retries) time.sleep(20) self.max_retries -= 1 # We'll need to move the tag to the new HEAD self.repo.tag.delete(self.tag_name) self.rebase_and_update_remote() else: # The remote changed several times while we were trying # to push the rebase result back. We stop trying for # now but leave the rebase results as is, so that the # build is still as up-to-date as can be. The patches # branch will be temporarily out of date, but that will # be corrected during the next Rebaser run. LOGGER.warning( "Remote changed multiple times during rebase, not pushing." " The build will include the current rebase result." ) else: # No new stuff, push it on self.update_remote_patches_branch() def perform_rebase(self): """Rebase the specific local branch to the specific commit.""" rebase_done = False while not rebase_done: try: LOGGER.info("Rebasing %s to %s", self.branch, self.commit) self.repo.branch.rebase_to_hash(self.branch, self.commit) rebase_done = True except git_exceptions.RebaseException as e: if not self.try_automated_rebase_fix(e): LOGGER.info("Could not rebase. Cleaning up.") self.repo.branch.abort_rebase() raise def try_automated_rebase_fix(self, exception): """Try to automatically fix a failed rebase. There will be a number of rebase failures we can try to fix in an automated way. Initially, failures related to the .gitreview file will be attempted. """ if '.gitreview' in str(exception): LOGGER.warning("A patch including the .gitreview file failed " "to rebase, skipping it.") try: self.repo.git.rebase('--skip') _rebuild_gitreview(self.repo, self.remote, self.branch) return True except Exception as e: LOGGER.error("Failed to fix rebase error: %s" % e) return False return False def update_remote_patches_branch(self): """Force push local patches branch to the remote repository. Also push the tag of what was the previous head of that remote branch. If in dev mode, the pushes are done in dry-run mode (-n). """ # Do we need to push? If there are no changes between the remote # and the local branch, just delete the local tag and move on. if not self.repo.branch.cherry_on_head_only( self.remote_branch, self.branch): self.repo.tag.delete(self.tag_name) return if self.dev_mode: LOGGER.warning("Dev mode: executing push commands in dry-run mode") self.repo.git.push("-n", self.remote, self.tag_name) self.repo.git.push("-nf", self.remote, self.branch) else: LOGGER.warning( "Force-pushing {branch} to {remote} ({timestamp})".format( branch=self.branch, remote=self.remote, timestamp=self.timestamp ) ) self.repo.git.push(self.remote, self.tag_name) self.repo.git.push("-f", self.remote, self.branch) def get_dlrn_variables(): """Return environment variables that are set by DLRN""" DLRNConfig = namedtuple( 'DLRNConfig', ['user', 'local_repo', 'commit', 'distroinfo_repo', 'pkg_name'] ) return DLRNConfig( os.environ['DLRN_USER'], os.environ['DLRN_SOURCEDIR'], os.environ['DLRN_SOURCE_COMMIT'], os.environ['DLRN_DISTROINFO_REPO'], os.environ['DLRN_PACKAGE_NAME'] ) def main(): dlrn = get_dlrn_variables() # Default values for options in patch_rebaser.ini defaults = { 'remote_name': 'remote_name', 'git_name': '<NAME>', 'git_email': '<EMAIL>', 'packages_to_process': '', 'dlrn_projects_ini': ( '/usr/local/share/dlrn/{0}/projects.ini'.format(dlrn.user)), 'dev_mode': 'true', 'patches_repo_key': 'patches', 'create_patches_branch': 'false' } config = get_rebaser_config(defaults) if config.packages_to_process and \ dlrn.pkg_name not in config.packages_to_process: LOGGER.info( "Skipping %s, as package not in list of packages_to_process", dlrn.pkg_name ) return set_up_git_config(config.git_name, config.git_email) repo = GitRepo(dlrn.local_repo) # Create a remote for the patches branch patches_repo = get_patches_repo( dlrn.distroinfo_repo, dlrn.pkg_name, config.patches_repo_key ) if not patches_repo: return if config.remote_name not in repo.remote.names(): if not repo.remote.add(config.remote_name, patches_repo): raise Exception( "Could not add remote {0} ({1})".format(config.remote_name, patches_repo) ) repo.remote.fetch_all() # Create local patches branch branch_name = get_patches_branch(repo, config.remote_name, config.dlrn_projects_ini) # Not every project has a -patches branch for every release if not branch_name: if config.create_patches_branch: LOGGER.warning('Patches branch does not exist, creating it') branch_name = create_patches_branch( repo, dlrn.commit, config.remote_name, dev_mode=config.dev_mode) if not branch_name: raise Exception('Could not create -patches branch') else: LOGGER.warning('Patches branch does not exist, not creating it') # We do not need to rebase now, since the branch was # created on using the upstream commit as a HEAD return # Timestamp that will be used to tag the previous branch tip timestamp = datetime.now().strftime("%Y%m%d%H%M%S") # Perform rebase & force push result rebaser = Rebaser(repo, branch_name, dlrn.commit, config.remote_name, timestamp, config.dev_mode) rebaser.rebase_and_update_remote() if __name__ == "__main__": try: main() except Exception as e: LOGGER.error(e) raise
<filename>src/main/java/com/yoya/net/impl/netty/NettyLogHandler.java<gh_stars>0 /* * Copyright (c) 2016, baihw (<EMAIL>). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations under the License. * */ package com.yoya.net.impl.netty; import static io.netty.buffer.ByteBufUtil.appendPrettyHexDump; import static io.netty.util.internal.StringUtil.NEWLINE; import java.net.SocketAddress; import com.yoya.rdf.log.ILog; import com.yoya.rdf.log.LogManager; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufHolder; import io.netty.channel.Channel; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; /** * Created by baihw on 16-5-27. * * netty通信相关日志处理器 */ final class NettyLogHandler extends ChannelDuplexHandler{ // 日志处理对象 private static final ILog _LOG = LogManager.getLog( "NettyLogHandler" ); @Override public void channelRegistered( ChannelHandlerContext ctx ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.channelRegistered( ctx ); } @Override public void channelUnregistered( ChannelHandlerContext ctx ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.channelUnregistered( ctx ); } @Override public void channelActive( ChannelHandlerContext ctx ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.channelActive( ctx ); } @Override public void channelInactive( ChannelHandlerContext ctx ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.channelInactive( ctx ); } @Override public void userEventTriggered( ChannelHandlerContext ctx, Object evt ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.userEventTriggered( ctx, evt ); } @Override public void bind( ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise future ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.bind( ctx, localAddress, future ); } @Override public void connect( ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise future ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.connect( ctx, remoteAddress, localAddress, future ); } @Override public void disconnect( ChannelHandlerContext ctx, ChannelPromise future ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.disconnect( ctx, future ); } @Override public void deregister( ChannelHandlerContext ctx, ChannelPromise future ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.deregister( ctx, future ); } @Override public void close( ChannelHandlerContext ctx, ChannelPromise future ) throws Exception{ _LOG.info( contextFormat( ctx ) ); super.close( ctx, future ); } @Override public void exceptionCaught( ChannelHandlerContext ctx, Throwable cause ) throws Exception{ _LOG.error( contextFormat( ctx ).concat( ",exception: " ).concat( cause.toString() ) ); super.exceptionCaught( ctx, cause ); } // @Override // public void channelRead( ChannelHandlerContext ctx, Object msg ) throws Exception{ // _LOG.debug( String.format( "channel: %s, msg: %s", ctx.channel(), formatMessage( "read", msg ) ) ); // super.channelRead( ctx, msg ); // } // // @Override // public void write( ChannelHandlerContext ctx, Object msg, ChannelPromise promise ) throws Exception{ // _LOG.debug( String.format( "channel: %s, msg: %s", ctx.channel(), formatMessage( "write", msg ) ) ); // super.write( ctx, msg, promise ); // } // // @Override // public void flush( ChannelHandlerContext ctx ) throws Exception{ // _LOG.debug( String.format( "channel: %s", ctx.channel() ) ); // super.flush( ctx ); // } /** * 格式化日志 * * @param ctx 通道 * @return 最终消息文本 */ private String contextFormat( ChannelHandlerContext ctx ){ Channel ch = ctx.channel(); StringBuilder sb = new StringBuilder(); sb.append( "channel:" ); sb.append( "{local:" ).append( ch.localAddress() ); sb.append( ",remote:" ).append( ch.remoteAddress() ); sb.append( "}" ); return sb.toString(); } protected String formatMessage( String eventName, Object msg ){ if( msg instanceof ByteBuf ){ return formatByteBuf( eventName, ( ByteBuf )msg ); }else if( msg instanceof ByteBufHolder ){ return formatByteBufHolder( eventName, ( ByteBufHolder )msg ); }else{ return formatNonByteBuf( eventName, msg ); } } private String formatByteBuf( String eventName, ByteBuf msg ){ int length = msg.readableBytes(); if( length == 0 ){ StringBuilder buf = new StringBuilder( eventName.length() + 4 ); buf.append( eventName ).append( ": 0B" ); return buf.toString(); }else{ int rows = length / 16 + ( length % 15 == 0 ? 0 : 1 ) + 4; StringBuilder buf = new StringBuilder( eventName.length() + 2 + 10 + 1 + 2 + rows * 80 ); buf.append( eventName ).append( ": " ).append( length ).append( 'B' ).append( NEWLINE ); appendPrettyHexDump( buf, msg ); return buf.toString(); } } private String formatNonByteBuf( String eventName, Object msg ){ return eventName + ": " + msg; } private String formatByteBufHolder( String eventName, ByteBufHolder msg ){ String msgStr = msg.toString(); ByteBuf content = msg.content(); int length = content.readableBytes(); if( length == 0 ){ StringBuilder buf = new StringBuilder( eventName.length() + 2 + msgStr.length() + 4 ); buf.append( eventName ).append( ", " ).append( msgStr ).append( ", 0B" ); return buf.toString(); }else{ int rows = length / 16 + ( length % 15 == 0 ? 0 : 1 ) + 4; StringBuilder buf = new StringBuilder( eventName.length() + 2 + msgStr.length() + 2 + 10 + 1 + 2 + rows * 80 ); buf.append( eventName ).append( ": " ).append( msgStr ).append( ", " ).append( length ).append( 'B' ).append( NEWLINE ); appendPrettyHexDump( buf, content ); return buf.toString(); } } } // end class
<reponame>wilfreddenton/coinmarketcap module Utils ( tickerLabelModifier , globalDataLabelModifier ) where import Data.Char (toLower) import Data.List (isPrefixOf) import Data.List.Utils (replace) import Text.Casing (fromHumps, toSnake) tickerLabelModifier :: String -> String tickerLabelModifier label = let label' = modifier label in case label' of "24_hvolume_usd" -> "24h_volume_usd" "percent_change1h" -> "percent_change_1h" "percent_change24h" -> "percent_change_24h" "percent_change7d" -> "percent_change_7d" _ -> label' globalDataLabelModifier :: String -> String globalDataLabelModifier label | prefix `isPrefixOf` label' = replace prefix "total_24" label' | otherwise = label' where label' = modifier label prefix = "total24" modifier :: String -> String modifier = (map toLower) . toSnake . fromHumps . (drop 1)
import styled from "styled-components"; const Button = styled.button` height: 2em; padding: 0 0.8em; border-radius: 0.3em; `; export const PrimaryButton = styled(Button)` color: ${(props) => props.theme.color.textOnMain}; background-color: ${(props) => props.theme.color.main}; `; export const NegativeButton = styled(Button)` color: ${(props) => props.theme.color.moderate}; background-color: transparent; border: 1px solid ${(props) => props.theme.color.moderate}; `;
<gh_stars>1000+ // --------------------------------------------------------------------------------------- // Shader Model 4.0 / 4.1 // --------------------------------------------------------------------------------------- // CalculateLevelOfDetail (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/windows/desktop/bb944001%28v=vs.85%29.aspx // Gather (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/windows/desktop/bb944003%28v=VS.85%29.aspx // GetDimensions (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb509693%28v=VS.85%29.aspx // GetSamplePosition (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb944004%28v=VS.85%29.aspx // Load (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb509694%28v=VS.85%29.aspx // Sample (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb509695%28v=VS.85%29.aspx // SampleBias (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb944005%28v=VS.85%29.aspx // SampleCmp (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb509696%28v=VS.85%29.aspx // SampleGrad (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb509698%28v=VS.85%29.aspx // SampleLevel (DirectX HLSL Texture Object) // http://msdn.microsoft.com/en-us/library/bb509699%28v=VS.85%29.aspx void GroupMemoryBarrierWithGroupSync(); class __Texture1D<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float1 x); void GetDimensions( uint MipLevel, out uint Width, out uint NumberOfLevels); void GetDimensions( out uint Width); void GetDimensions( uint MipLevel, out float Width, out float NumberOfLevels); void GetDimensions( out float Width); T Load(int2 Location); T Load(int2 Location, int Offset); float4 Sample(sampler_state S, float Location); float4 Sample(sampler_state S, float Location, int Offset); float4 SampleBias(sampler_state S, float Location, float Bias); float4 SampleBias(sampler_state S, float Location, float Bias, int Offset); float SampleCmp(sampler_state S, float Location, float CompareValue); float SampleCmp(sampler_state S, float Location, float CompareValue, int Offset); float SampleCmpLevelZero(sampler_state S, float Location, float CompareValue); float SampleCmpLevelZero(sampler_state S, float Location, float CompareValue, int Offset); float4 SampleGrad(sampler_state S, float Location, float DDX, float DDY); float4 SampleGrad(sampler_state S, float Location, float DDX, float DDY, int Offset); float4 SampleLevel( sampler_state S, float Location, float LOD); float4 SampleLevel( sampler_state S, float Location, float LOD, int Offset); // SM 5.0 T mips.operator[][](in uint mipSlice,in uint pos); T operator[](in uint pos); }; class __Texture1DArray<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float1 x); void GetDimensions( uint MipLevel, out uint Width, out uint Elements, out uint NumberOfLevels); void GetDimensions( out uint Width, out uint Elements); void GetDimensions( uint MipLevel, out float Width, out float Elements, out float NumberOfLevels); void GetDimensions( out float Width, out float Elements); T Load(int3 Location); T Load(int3 Location, int Offset); float4 Sample(sampler_state S, float2 Location); float4 Sample(sampler_state S, float2 Location, int Offset); float4 SampleBias(sampler_state S, float2 Location, float Bias); float4 SampleBias(sampler_state S, float2 Location, float Bias, int Offset); float SampleCmp(sampler_state S, float2 Location, float CompareValue); float SampleCmp(sampler_state S, float2 Location, float CompareValue, int Offset); float SampleCmpLevelZero(sampler_state S, float2 Location, float CompareValue); float SampleCmpLevelZero(sampler_state S, float2 Location, float CompareValue, int Offset); float4 SampleGrad(sampler_state S, float2 Location, float DDX, float DDY); float4 SampleGrad(sampler_state S, float2 Location, float DDX, float DDY, int Offset); float4 SampleLevel( sampler_state S, float2 Location, float LOD); float4 SampleLevel( sampler_state S, float2 Location, float LOD, int Offset); // SM 5.0 T mips.operator[][](in uint mipSlice,in uint2 pos); T operator[](in uint2 pos); }; class __Texture2D<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float2 x); vector<__T_base,4> Gather( sampler_state S, float2 Location); vector<__T_base,4> Gather( sampler_state S, float2 Location, int2 Offset ); void GetDimensions( uint MipLevel, out uint Width, out uint Height, out uint NumberOfLevels); void GetDimensions( out uint Width, out uint Height); void GetDimensions( uint MipLevel, out float Width, out float Height, out float NumberOfLevels); void GetDimensions( out float Width, out float Height); T Load(int3 Location); T Load(int3 Location, int2 Offset); float4 Sample(sampler_state S, float2 Location); float4 Sample(sampler_state S, float2 Location, int2 Offset); float4 SampleBias(sampler_state S, float2 Location, float Bias); float4 SampleBias(sampler_state S, float2 Location, float Bias, int2 Offset); float SampleCmp(sampler_state S, float2 Location, float CompareValue); float SampleCmp(sampler_state S, float2 Location, float CompareValue, int2 Offset); float SampleCmpLevelZero(sampler_state S, float2 Location, float CompareValue); float SampleCmpLevelZero(sampler_state S, float2 Location, float CompareValue, int2 Offset); float4 SampleGrad(sampler_state S, float2 Location, float2 DDX, float2 DDY); float4 SampleGrad(sampler_state S, float2 Location, float2 DDX, float2 DDY, int2 Offset); float4 SampleLevel( sampler_state S, float2 Location, float LOD); float4 SampleLevel( sampler_state S, float2 Location, float LOD, int2 Offset); // SM 5.0 T Gather( in sampler s, in float2 location, in int2 offset ); T GatherRed( in sampler s, in float2 location ); T GatherGreen( in sampler s, in float2 location ); T GatherBlue( in sampler s, in float2 location ); T GatherRed( in sampler s, in float2 location, in int2 offset ); T GatherGreen( in sampler s, in float2 location, in int2 offset ); T GatherBlue( in sampler s, in float2 location, in int2 offset ); T GatherAlpha( in sampler s, in float2 location, in int2 offset ); T GatherRed( in sampler s, in float2 location, in int2 offset1, in int2 offset2, in int2 offset3, in int2 offset4 ); T GatherGreen( in sampler s, in float2 location, in int2 offset1, in int2 offset2, in int2 offset3, in int2 offset4 ); T GatherBlue( in sampler s, in float2 location, in int2 offset1, in int2 offset2, in int2 offset3, in int2 offset4 ); T GatherAlpha( in sampler s, in float2 location, in int2 offset1, in int2 offset2, in int2 offset3, in int2 offset4 ); float4 GatherCmp( in SamplerComparisonState s, in float2 location, in float compare_value, in int2 offset ); float4 GatherCmpRed( in SamplerComparisonState s, in float2 location, in float compare_value, in int2 offset ); float4 GatherCmpGreen( in SamplerComparisonState s, in float2 location, in float compare_value, in int2 offset ); float4 GatherCmpBlue( in SamplerComparisonState s, in float2 location, in float compare_value, in int2 offset ); float4 GatherCmpAlpha( in SamplerComparisonState s, in float2 location, in float compare_value, in int2 offset ); T mips.operator[][](in uint mipSlice, in uint2 pos); T operator[](in uint2 pos); }; class __Texture2DArray<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float2 x); vector<__T_base,4> Gather( sampler_state S, float3 Location, int2 Offset ); void GetDimensions( uint MipLevel, out uint Width, out uint Height, out uint Elements, out uint NumberOfLevels); void GetDimensions( out uint Width, out uint Height, out uint Elements); void GetDimensions( uint MipLevel, out float Width, out float Height, out float Elements, out float NumberOfLevels); void GetDimensions( out float Width, out float Height, out float Elements); T Load(int4 Location); T Load(int4 Location, int2 Offset); T Load(int4 Location, int3 Offset); float4 Sample(sampler_state S, float3 Location); float4 Sample(sampler_state S, float3 Location, int2 Offset); float4 SampleBias(sampler_state S, float3 Location, float Bias); float4 SampleBias(sampler_state S, float3 Location, float Bias, int2 Offset); float SampleCmp(sampler_state S, float3 Location, float CompareValue); float SampleCmp(sampler_state S, float3 Location, float CompareValue, int2 Offset); float SampleCmpLevelZero(sampler_state S, float3 Location, float CompareValue); float SampleCmpLevelZero(sampler_state S, float3 Location, float CompareValue, int2 Offset); float4 SampleGrad(sampler_state S, float3 Location, float2 DDX, float2 DDY); float4 SampleGrad(sampler_state S, float3 Location, float2 DDX, float2 DDY, int2 Offset); float4 SampleLevel( sampler_state S, float3 Location, float LOD); float4 SampleLevel( sampler_state S, float3 Location, float LOD, int2 Offset); // SM 5.0 T Gather( in sampler s, in float3 location, in int2 offset ); T GatherRed( in sampler s, in float3 location, in int2 offset ); T GatherGreen( in sampler s, in float3 location, in int2 offset ); T GatherBlue( in sampler s, in float3 location, in int2 offset ); T GatherAlpha( in sampler s, in float3 location, in int2 offset ); float4 GatherCmp( in SamplerComparisonState s, in float3 location, in float compare_value, in int2 offset ); float4 GatherCmpRed( in SamplerComparisonState s, in float3 location, in float compare_value, in int2 offset ); float4 GatherCmpGreen( in SamplerComparisonState s, in float3 location, in float compare_value, in int2 offset ); float4 GatherCmpBlue( in SamplerComparisonState s, in float3 location, in float compare_value, in int2 offset ); float4 GatherCmpAlpha( in SamplerComparisonState s, in float3 location, in float compare_value, in int2 offset ); T mips.operator[][](in uint mipSlice, in uint3 pos); T operator[](in uint3 pos); }; class __Texture3D<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float3 x); void GetDimensions( uint MipLevel, out uint Width, out uint Height, out uint Depth, out uint NumberOfLevels); void GetDimensions( out uint Width, out uint Height, out uint Depth); void GetDimensions( uint MipLevel, out float Width, out float Height, out float Depth, out float NumberOfLevels); void GetDimensions( out float Width, out float Height, out float Depth); T Load(int4 Location); T Load(int4 Location, int3 Offset); float4 Sample(sampler_state S, float3 Location); float4 Sample(sampler_state S, float3 Location, int3 Offset); float4 SampleBias(sampler_state S, float3 Location, float Bias); float4 SampleBias(sampler_state S, float3 Location, float Bias, int3 Offset); float SampleCmp(sampler_state S, float3 Location, float CompareValue); float SampleCmp(sampler_state S, float3 Location, float CompareValue, int3 Offset); float4 SampleGrad(sampler_state S, float3 Location, float3 DDX, float3 DDY); float4 SampleGrad(sampler_state S, float3 Location, float3 DDX, float3 DDY, int3 Offset); float4 SampleLevel( sampler_state S, float3 Location, float LOD); float4 SampleLevel( sampler_state S, float3 Location, float LOD, int3 Offset); // SM 5.0 T mips.operator[][](in uint mipSlice,in uint3 pos); T operator[](in uint3 pos); }; class __TextureCube<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float3 x); vector<__T_base,4> Gather( sampler_state S, float3 Location); void GetDimensions( uint MipLevel, out uint Width, out uint Height, out uint NumberOfLevels); void GetDimensions( out uint Width, out uint Height); void GetDimensions( uint MipLevel, out float Width, out float Height, out uint NumberOfLevels); void GetDimensions( out float Width, out float Height); float4 Sample(sampler_state S, float3 Location); float4 SampleBias(sampler_state S, float3 Location, float Bias); float SampleCmp(sampler_state S, float3 Location, float CompareValue); float SampleCmpLevelZero(sampler_state S, float3 Location, float CompareValue); float4 SampleGrad(sampler_state S, float3 Location, float3 DDX, float3 DDY); float4 SampleLevel( sampler_state S, float3 Location, float LOD); }; class __TextureCubeArray<T> { // SM 4.0 float CalculateLevelOfDetail( sampler_state s, float3 x); vector<__T_base,4> Gather( sampler_state S, float4 Location); void GetDimensions( uint MipLevel, out uint Width, out uint Height, out uint Elements, out uint NumberOfLevels); void GetDimensions( out uint Width, out uint Height, out uint Elements); void GetDimensions( uint MipLevel, out float Width, out float Height, out float Elements, out float NumberOfLevels); void GetDimensions( out float Width, out float Height, out float Elements); float4 Sample(sampler_state S, float4 Location); float4 SampleBias(sampler_state S, float4 Location, float Bias); float SampleCmp(sampler_state S, float4 Location, float CompareValue); float SampleCmpLevelZero(sampler_state S, float4 Location, float CompareValue); float4 SampleGrad(sampler_state S, float4 Location, float3 DDX, float3 DDY); float4 SampleLevel( sampler_state S, float4 Location, float LOD); }; class __Texture2DMS<T> { // SM 4.0 void GetDimensions( out uint Width, out uint Height, out uint Samples); void GetDimensions( out float Width, out float Height, out float Samples); float2 GetSamplePosition(int s); T Load(int2 Location); T Load(int2 Location, int2 Offset); T Load(int2 Location, int2 Offset, int SampleIndex); // SM 5.0 float2 GetSamplePosition( in int sampleindex ); T Load( in int2 coord, in int sampleindex ); T sample.operator[][]( in uint sampleSlice, in uint3 pos); }; class __Texture2DMSArray<T> { // SM 4.0 void GetDimensions( out uint Width, out uint Height, out uint Elements, out uint Samples); void GetDimensions( out float Width, out float Height, out float Elements, out float Samples); float2 GetSamplePosition(int s); T Load(int3 Location); T Load(int3 Location, int2 Offset); T Load(int3 Location, int2 Offset, int SampleIndex); // SM 5.0 float2 GetSamplePosition( in int sampleindex ); T Load( in int3 coord, in int sampleindex ); T sample.operator[][]( in uint sampleSlice, in uint3 pos); }; class __Buffer<T> { // SM 4.0 T Load(int Location); void GetDimensions(out uint dim); T operator[](in uint pos); }; // Stream-Output Object (DirectX HLSL) // http://msdn.microsoft.com/en-us/library/bb509661%28v=VS.85%29.aspx // StreamOutputObject <T> Name // StreamOutputObject: PointStream, LineStream, TriangleStream class __PointStream<T> { void Append(T StreamDataType); void RestartStrip(); }; class __LineStream<T> { void Append(T StreamDataType); void RestartStrip(); }; class __TriangleStream<T> { void Append(T StreamDataType); void RestartStrip(); }; // --------------------------------------------------------------------------------------- // Shader Model 5.0 // --------------------------------------------------------------------------------------- // AppendStructuredBuffer<T> // http://msdn.microsoft.com/en-us/library/ff471448%28v=VS.85%29.aspx class __AppendStructuredBuffer<T> { void Append(T value); void GetDimensions(out uint numStructs, out uint stride); }; // ByteAddressBuffer // http://msdn.microsoft.com/en-us/library/ff471453%28v=VS.85%29.aspx class __ByteAddressBuffer { void GetDimensions(out uint dim); uint Load(in uint address); uint2 Load2(in uint address); uint3 Load3(in uint address); uint4 Load4(in uint address); }; // ConsumeStructuredBuffer<T> // http://msdn.microsoft.com/en-us/library/ff471459%28v=VS.85%29.aspx class __ConsumeStructuredBuffer<T> { T Consume(void); void GetDimensions(out uint numStructs, out uint stride); }; // InputPatch<T,N> // http://msdn.microsoft.com/en-us/library/ff471462%28v=VS.85%29.aspx class __InputPatch<T,N> { uint Length; T operator[](in uint n); }; // OutputPatch<T,N> // http://msdn.microsoft.com/en-us/library/ff471464%28v=VS.85%29.aspx class __OutputPatch<T,N> { uint Length; T operator[](in uint n); }; // RWBuffer<T> // http://msdn.microsoft.com/en-us/library/ff471472%28v=VS.85%29.aspx class __RWBuffer<T> { void GetDimensions(out uint dim); T operator[](in uint pos); }; // RWByteAddressBuffer // http://msdn.microsoft.com/en-us/library/ff471475%28v=VS.85%29.aspx class __RWByteAddressBuffer { void GetDimensions(out uint dim); void InterlockedAdd(in uint dest, in uint value, out uint original_value); void InterlockedAnd( in uint dest, in uint value, out uint original_value ); void InterlockedCompareExchange( in uint dest, in uint compare_value, in uint value, out uint original_value ); void InterlockedCompareStore( in uint dest, in uint compare_value, in uint value ); void InterlockedExchange( in uint dest, in uint value, out uint original_value ); void InterlockedMax( in uint dest, in uint value, out uint original_value ); void InterlockedMin( in uint dest, in uint value, out uint original_value ); void InterlockedOr( in uint dest, in uint value, out uint original_value ); void InterlockedXor( in uint dest, in uint value, out uint original_value ); uint Load( in uint address ); uint2 Load2( in uint address ); uint3 Load3( in uint address ); uint4 Load4( in uint address ); void Store( in uint address, in uint value ); void Store2( in uint address, in uint2 values ); void Store3( in uint address, in uint3 values ); void Store4( in uint address, in uint4 values ); }; // RWStructuredBuffer<T> // http://msdn.microsoft.com/en-us/library/ff471494%28v=VS.85%29.aspx class __RWStructuredBuffer<T> { uint DecrementCounter(void); void GetDimensions( out uint numStructs, out uint stride ); uint IncrementCounter(void); T operator[](in uint pos); }; // RWTexture1D<T> // http://msdn.microsoft.com/en-us/library/ff471499%28v=VS.85%29.aspx class __RWTexture1D<T> { void GetDimensions( out uint Width ); T operator[](in uint pos); }; // RWTexture1DArray<T> // http://msdn.microsoft.com/en-us/library/ff471500%28v=VS.85%29.aspx class __RWTexture1DArray<T> { void GetDimensions( out uint Width, out uint Elements ); T operator[](in uint2 pos); }; // RWTexture2D<T> // http://msdn.microsoft.com/en-us/library/ff471505%28v=VS.85%29.aspx class __RWTexture2D<T> { void GetDimensions( out uint Width, out uint Height ); T operator[](in uint2 pos); }; // RWTexture2DArray<T> // http://msdn.microsoft.com/en-us/library/ff471506%28v=VS.85%29.aspx class __RWTexture2DArray<T> { void GetDimensions( out uint Width, out uint Height, out uint Elements ); T operator[](in uint3 pos); }; // RWTexture3D<T> // http://msdn.microsoft.com/en-us/library/ff471511%28v=VS.85%29.aspx class __RWTexture3D<T> { void GetDimensions( out uint Width, out uint Height, out uint Depth ); T operator[](in uint3 pos); }; // StructuredBuffer<T> // http://msdn.microsoft.com/en-us/library/ff471514%28v=VS.85%29.aspx class __StructuredBuffer<T> { void GetDimensions( out uint numStructs, out uint stride ); T operator[](in uint pos); };
/** * @author Steve Ebersole */ @Entity @Table(name = "vgras007_v031") public class DoesNotWork implements Serializable { private static final long serialVersionUID = 1L; @EmbeddedId private DoesNotWorkPk doesNotWorkPk; @Column(name = "production_credits_tid", insertable = false, updatable = false) private Long globAdditInfoTid; @ElementCollection @CollectionTable( name = "vgras029_v031", joinColumns = @JoinColumn(name = "text_id", referencedColumnName = "production_credits_tid") ) @Column(name = "text_part", insertable = false, updatable = false) @IndexColumn(name = "seq_no", base = 1) private List<String> globalNotes = new ArrayList<String>(); public DoesNotWork() { } public DoesNotWork(DoesNotWorkPk doesNotWorkPk) { this.doesNotWorkPk = doesNotWorkPk; } public DoesNotWorkPk getDoesNotWorkPk() { return doesNotWorkPk; } public void setDoesNotWorkPk(DoesNotWorkPk doesNotWorkPk) { this.doesNotWorkPk = doesNotWorkPk; } public List<String> getGlobalNotes() { return globalNotes; } public void setGlobalNotes(List<String> globalNotes) { this.globalNotes = globalNotes; } public Long getGlobAdditInfoTid() { return globAdditInfoTid; } public void setGlobAdditInfoTid(Long globAdditInfoTid) { this.globAdditInfoTid = globAdditInfoTid; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((doesNotWorkPk == null) ? 0 : doesNotWorkPk.hashCode()); return result; } @Override public boolean equals(Object obj) { if ( this == obj ) { return true; } if ( obj == null ) { return false; } if ( !(obj instanceof DoesNotWork) ) { return false; } DoesNotWork other = (DoesNotWork) obj; if ( doesNotWorkPk == null ) { if ( other.doesNotWorkPk != null ) { return false; } } else if ( !doesNotWorkPk.equals( other.doesNotWorkPk ) ) { return false; } return true; } }
/** * Increment dics. This way should limit object creation */ private void inc(final CharsAtt chars, final int flag) { @SuppressWarnings("unlikely-arg-type") Entry entry = dic.get(chars); if (entry == null) { String lem = chars.toString(); dic.put(lem, new Entry(lem, flag)); } else { entry.count++; } }
import java.util.Scanner; public class Main { public static void main(String args[]) { Scanner scan=new Scanner(System.in); int n=scan.nextInt(); int k=scan.nextInt(); String S=scan.next(); int head[]=new int[n+1]; int count=1; for(int i=1;i<n;i++) { if(S.charAt(i-1)!=S.charAt(i)) { head[count++]=i; } } head[count]=n; int max=1; for(int i=0;i<count;i++) { boolean zero=S.charAt(head[i])=='0'; int last=Math.min(i+k*2+(zero?0:1),count); int sum=head[last]-head[i]; max=Math.max(max, sum); } System.out.println(max); } }
/** Seats the customer at a specific table * @param customer customer that needs seated */ private void seatCustomer(MyCustomer customer) { Do("Seating " + customer.cmr + " at table " + (customer.tableNum+1)); Customer guiCustomer = customer.cmr.getGuiCustomer(); guiMoveFromCurrentPostionTo(new Position(guiCustomer.getX()+1,guiCustomer.getY())); waiter.pickUpCustomer(guiCustomer); Position tablePos = new Position(tables[customer.tableNum].getX()-1, tables[customer.tableNum].getY()+1); guiMoveFromCurrentPostionTo(tablePos); waiter.seatCustomer(tables[customer.tableNum]); customer.state = CustomerState.NO_ACTION; customer.cmr.msgFollowMeToTable(this, new Menu()); stateChanged(); }
Fractionation of the more active extracts of Geranium molle L.: a relationship between their phenolic profile and biological activity. Geranium molle L., commonly known as Dove's-foot Crane's-bill or Dovesfoot Geranium, is an herbaceous plant belonging to the Geraniaceae family. Contrary to many other Geranium species, the bioactivity and the phytochemical composition of G. molle seem not to have attracted attention until a recent study from our group regarding the bioactivity of several aqueous and organic extracts of the plant. In particular, we assessed the cytotoxic activity of these extracts against several human tumor cell lines (breast, lung, cervical and hepatocellular carcinomas) and a non-tumor porcine liver primary cell line, inspired by an ethnopharmacological report describing the traditional use of this medicinal plant in some regions of Northeast Portugal for the treatment of cancer. Following this preliminary evaluation, the most active extracts (acetone and methanol) were fractionated by column chromatography and the resulting fractions were evaluated for their antioxidant activity and cytotoxicity against the same cell lines. The bio-guided fractionation of the extracts resulted in several fractions exhibiting improved bioactivity in comparison with the corresponding crude extracts. The fractions obtained from the acetone extract consistently displayed the lowest EC50 and GI50 values and presented the highest content of total phenolic compounds. The phytochemical composition of the most bioactive fractions of the acetone and methanol extracts was also determined and about thirty compounds, mainly flavonoids and phenolic acids, could be identified for the first time in G. molle.
<reponame>BioPhoton/IxJS 'use strict'; import { identity } from '../util/identity'; export function min(source: Iterable<number>, fn?: (x: number) => number): number; export function min<T>(source: Iterable<T>, fn: (x: T) => number): number; export function min(source: Iterable<any>, fn: (x: any) => number = identity): number { let atleastOnce = false; let value = Infinity; for (let item of source) { if (!atleastOnce) { atleastOnce = true; } let x = fn(item); if (x < value) { value = x; } } if (!atleastOnce) { throw new Error('Sequence contains no elements'); } return value; }
t = int(input()) L=[] for i in range(t): n=int(input()) s=input() L.append((n,s)) for x in L: if x[0]<11: print('NO') else: y=x[1].find('8') if y==-1: print('NO') elif y>x[0]-11: print('NO') else: print('YES')
. To determine whether long-term oral administration of UFT, a combination of 5-fluorouracil and uracil, in addition to conventional estrogen therapy improved the response and survival of the patients with advanced stage D2 prostate adenocarcinoma, a randomized prospective study was performed with either estrogen alone (Honvan 200 mg/day or presexol 1 mg/day: group A) or estrogen plus UFT (400 mg/day:group B). This study comprises 34 newly diagnosed patients with poorly differentiated prostatic adenocarcinoma (18 patients in group A and 16 in group B). Survival from all causes of death or cancer-specific death were compared using Kaplan-Meier actual methods among the patients separated by histological composition of tumors analyzed WHO histologic patterns, score of extent of disease (EOD), and with or without normalization of serum PSA or PAP levels after treatment. Although combination therapy with UFT against overall survival was effective without statistical significance, better survival in this group than the patients treatment with estrogen alone assessed among the patients whose tumor contained more than 70% of medullary and/or column-cord histological components. The survivals among the patients with EOD score 3 and whose serum PSA or PAP levels did not lead to decrease within normal levels after treatment were also better in group B than in group A. These findings suggest the validity of the combination therapy with UFT in addition to estrogen against highly advanced prostatic cancer patients whose tumor composed of abundant non-hormone-refractor histological components.
<filename>Eon/Source/Math/Mat4.cpp #include "Common.h" #include "Math/Mat4.h" namespace eon { namespace math { Mat4::Mat4(float diagonal) { for (int i = 0; i < 16; i++) { elements[i] = 0; } Set(0, 0, diagonal); Set(1, 1, diagonal); Set(2, 2, diagonal); Set(3, 3, diagonal); } Mat4 Mat4::operator*(Mat4 &other) { Mat4 ret(0.0f); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { float sum = 0; for (int k = 0; k < 4; k++) { sum += Get(j, k) * other.Get(k, i); } ret.Set(j, i, sum); } } return ret; } Vec4 Mat4::operator*(Vec4 &other) { Vec4 ret(0.0f, 0.0f, 0.0f, 0.0f); for (int i = 0; i < 4; i++) { float sum = 0; sum += Get(i, 0) * other.x; sum += Get(i, 1) * other.y; sum += Get(i, 2) * other.z; sum += Get(i, 3) * other.w; switch (i) { case 0: ret.x = sum; case 1: ret.y = sum; case 2: ret.z = sum; case 3: ret.w = sum; } } return ret; } void Mat4::operator*=(Mat4 &other) { *this = *this * other; } Mat4 Mat4::Translate(Vec3 delta) { Mat4 ret(1.0f); ret.Set(0, 3, delta.x); ret.Set(1, 3, delta.y); ret.Set(2, 3, delta.z); return ret; } Mat4 Mat4::RotateX(float radians) { Mat4 ret(1.0f); ret.Set(1, 1, cos(radians)); ret.Set(1, 2, -sin(radians)); ret.Set(2, 1, sin(radians)); ret.Set(2, 2, cos(radians)); return ret; } Mat4 Mat4::RotateY(float radians) { Mat4 ret(1.0f); ret.Set(0, 0, cos(radians)); ret.Set(0, 2, sin(radians)); ret.Set(2, 0, -sin(radians)); ret.Set(2, 2, cos(radians)); return ret; } Mat4 Mat4::RotateZ(float radians) { Mat4 ret(1.0f); ret.Set(0, 0, cos(radians)); ret.Set(0, 1, -sin(radians)); ret.Set(1, 0, sin(radians)); ret.Set(1, 1, cos(radians)); return ret; } Mat4 Mat4::LookAt(Vec3 right, Vec3 up, Vec3 dir, Vec3 pos) { Mat4 mat1(1.0f); Mat4 mat2(1.0f); mat1.Set(0, 0, right.x); mat1.Set(0, 1, right.y); mat1.Set(0, 2, right.z); mat1.Set(1, 0, up.x); mat1.Set(1, 1, up.y); mat1.Set(1, 2, up.z); mat1.Set(2, 0, dir.x); mat1.Set(2, 1, dir.y); mat1.Set(2, 2, dir.z); mat2.Set(0, 3, -pos.x); mat2.Set(1, 3, -pos.y); mat2.Set(2, 3, -pos.z); return mat1 * mat2; } Mat4 Mat4::Ortho(float left, float right, float bottom, float top, float near, float far) { Mat4 ret(1.0f); ret.Set(0, 0, 2 / (right - left)); ret.Set(1, 1, 2 / (top - bottom)); ret.Set(1, 1, -2 / (far - near)); ret.Set(0, 3, -((right + left) / (right - left))); ret.Set(1, 3, -((top + bottom) / (top - bottom))); ret.Set(2, 3, -((far + near) / (far - near))); return ret; } Mat4 Mat4::Persp(float fov, float aspectRatio, float near, float far) { Mat4 ret(0.0f); float range = near - far; float tanHalfFov = tan(fov / 2); ret.Set(0, 0, 1 / (tanHalfFov * aspectRatio)); ret.Set(1, 1, 1 / tanHalfFov); ret.Set(2, 2, -far / range); ret.Set(2, 3, (far * near) / range); ret.Set(3, 2, 1); return ret; } void Mat4::Print() { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { std::cout << "| " << Get(i, j) << " "; } std::cout << std::endl; } } } }
/* Virtual Translation Tables Dump * * Input Args: * vm - Virtual Machine * indent - Left margin indent amount * * Output Args: * stream - Output FILE stream * * Return: None * * Dumps to the FILE stream given by stream, the contents of all the * virtual translation tables for the VM given by vm. */ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) { struct pageMapL4Entry *pml4e, *pml4e_start; struct pageDirectoryPointerEntry *pdpe, *pdpe_start; struct pageDirectoryEntry *pde, *pde_start; struct pageTableEntry *pte, *pte_start; if (!vm->pgd_created) return; fprintf(stream, "%*s " " no\n", indent, ""); fprintf(stream, "%*s index hvaddr gpaddr " "addr w exec dirty\n", indent, ""); pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm, vm->pgd); for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { pml4e = &pml4e_start[n1]; if (!pml4e->present) continue; fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u " " %u\n", indent, "", pml4e - pml4e_start, pml4e, addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address, pml4e->writable, pml4e->execute_disable); pdpe_start = addr_gpa2hva(vm, pml4e->address * vm->page_size); for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { pdpe = &pdpe_start[n2]; if (!pdpe->present) continue; fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx " "%u %u\n", indent, "", pdpe - pdpe_start, pdpe, addr_hva2gpa(vm, pdpe), (uint64_t) pdpe->address, pdpe->writable, pdpe->execute_disable); pde_start = addr_gpa2hva(vm, pdpe->address * vm->page_size); for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { pde = &pde_start[n3]; if (!pde->present) continue; fprintf(stream, "%*spde 0x%-3zx %p " "0x%-12lx 0x%-10lx %u %u\n", indent, "", pde - pde_start, pde, addr_hva2gpa(vm, pde), (uint64_t) pde->address, pde->writable, pde->execute_disable); pte_start = addr_gpa2hva(vm, pde->address * vm->page_size); for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { pte = &pte_start[n4]; if (!pte->present) continue; fprintf(stream, "%*spte 0x%-3zx %p " "0x%-12lx 0x%-10lx %u %u " " %u 0x%-10lx\n", indent, "", pte - pte_start, pte, addr_hva2gpa(vm, pte), (uint64_t) pte->address, pte->writable, pte->execute_disable, pte->dirty, ((uint64_t) n1 << 27) | ((uint64_t) n2 << 18) | ((uint64_t) n3 << 9) | ((uint64_t) n4)); } } } } }
<gh_stars>0 package net.jitse.npclib.listeners; import org.bukkit.Bukkit; import org.bukkit.entity.Player; import net.jitse.npclib.internal.NPCBase; import net.jitse.npclib.internal.NPCManager; public class HandleMoveBase { void handleMove(Player player) { for (NPCBase npc : NPCManager.getAllNPCs()) { //Ik begrijp niet helemaal wat deze statement intentionally zou moeten doen. //Momenteel prevent hij in ieder geval dat NPC's niet meer visible worden als ze eenmaal unshown zijn, dus heb ik het gecomment. //xx Onno /* if (!npc.getShown().contains(player.getUniqueId())) { continue; // NPC was never supposed to be shown to the player. } */ if (!npc.isShown(player) && npc.inRangeOf(player) && npc.inViewOf(player)) { // The player is in range and can see the NPC, auto-show it. npc.show(player, true); } else if (npc.isShown(player) && !npc.inRangeOf(player)) { // The player is not in range of the NPC anymore, auto-hide it. npc.hide(player, true); } } } }
import * as http from 'http'; import * as net from 'net'; import * as WebSocket from 'ws'; import { ServerConfig } from '@marblejs/core'; import { WebSocketClientConnection, WebSocketServerConnection, WebSocketServerConfig } from '../server/websocket.server.interface'; import { createWebSocketServer } from '../server/websocket.server'; import { WsServerEffect } from '../effects/websocket.effects.interface'; export const bootstrapHttpServer = async (host?: string, port?: number): Promise<http.Server> => new Promise(resolve => { const httpServer = http.createServer(); httpServer.listen(port, host, () => resolve(httpServer)); }); export const bootstrapWebSocketClient = (httpServer: http.Server): Promise<WebSocket> => { const serverAddressInfo = httpServer.address() as net.AddressInfo; const host = serverAddressInfo.address === '::' ? '127.0.0.1' : serverAddressInfo.address; const port = serverAddressInfo.port; const client = new WebSocket(`ws://${host}:${port}`); return new Promise(resolve => client.once('open', () => resolve(client))); }; export const bootstrapWebSocketServer = async ( options: WebSocketServerConfig['options'], listener: ServerConfig<any, any>['listener'], event$?: WsServerEffect, dependencies?: WebSocketServerConfig['dependencies'], ) => (await createWebSocketServer({ options, listener, event$, dependencies }))(); export const createWebSocketClientMock = (): WebSocketClientConnection => { class WebSocketClientMock extends WebSocket.EventEmitter { isAlive = false; ping = jest.fn(); close = jest.fn(); terminate = jest.fn(); } return new WebSocketClientMock() as any; }; export const createWebSocketServerMock = (clients: ReturnType<typeof createWebSocketClientMock>[]): WebSocketServerConnection => { class WebSocketServerMock extends WebSocket.EventEmitter { constructor(public clients: ReturnType<typeof createWebSocketClientMock>[]) { super(); } } return new WebSocketServerMock(clients) as any; };
<gh_stars>1-10 package com.housaiying.qingting.listen.fragment; import android.view.View; import androidx.annotation.NonNull; import androidx.lifecycle.ViewModelProvider; import androidx.recyclerview.widget.LinearLayoutManager; import com.alibaba.android.arouter.facade.annotation.Route; import com.chad.library.adapter.base.BaseQuickAdapter; import com.housaiying.qingting.common.Constants; import com.housaiying.qingting.common.bean.FavoriteBean; import com.housaiying.qingting.common.databinding.CommonLayoutListBinding; import com.housaiying.qingting.common.mvvm.view.BaseRefreshMvvmFragment; import com.housaiying.qingting.listen.R; import com.housaiying.qingting.listen.adapter.FavoriteAdapter; import com.housaiying.qingting.listen.mvvm.ViewModelFactory; import com.housaiying.qingting.listen.mvvm.viewmodel.FavoriteViewModel; import com.ximalaya.ting.android.opensdk.model.track.Track; /** * Author: housaiying * <br/>Date: 2020/3/16 8:45 * <br/>Email: <EMAIL> * <br/>Description:我的喜欢 */ @Route(path = Constants.Router.Listen.F_FAVORITE) public class FavoriteFragment extends BaseRefreshMvvmFragment<CommonLayoutListBinding, FavoriteViewModel, FavoriteBean> implements BaseQuickAdapter.OnItemChildClickListener, BaseQuickAdapter.OnItemClickListener { private FavoriteAdapter mFavoriteAdapter; @Override protected int onBindLayout() { return R.layout.common_layout_list; } @Override protected void initView() { mBinding.recyclerview.setLayoutManager(new LinearLayoutManager(mActivity)); mBinding.recyclerview.setHasFixedSize(true); mFavoriteAdapter = new FavoriteAdapter(R.layout.listen_item_favorite); mFavoriteAdapter.bindToRecyclerView(mBinding.recyclerview); } @Override public void initListener() { super.initListener(); mFavoriteAdapter.setOnItemChildClickListener(this); mFavoriteAdapter.setOnItemClickListener(this); } @NonNull @Override protected WrapRefresh onBindWrapRefresh() { return new WrapRefresh(mBinding.refreshLayout, mFavoriteAdapter); } @Override public void initData() { mViewModel.init(); } @Override public void initViewObservable() { mViewModel.getInitFavoritesEvent().observe(this, favoriteBeans -> mFavoriteAdapter.setNewData(favoriteBeans)); } @Override public String[] onBindBarTitleText() { return new String[]{"我喜欢的声音"}; } @Override public void onItemChildClick(BaseQuickAdapter adapter, View view, int position) { mViewModel.unlike(mFavoriteAdapter.getItem(position).getTrack()); mFavoriteAdapter.remove(position); if (mFavoriteAdapter.getData().size() == 0) { showEmptyView(); } } @Override public void onItemClick(BaseQuickAdapter adapter, View view, int position) { Track track = mFavoriteAdapter.getItem(position).getTrack(); mViewModel.play(track.getAlbum().getAlbumId(), track.getDataId()); } @Override public Class<FavoriteViewModel> onBindViewModel() { return FavoriteViewModel.class; } @Override public ViewModelProvider.Factory onBindViewModelFactory() { return ViewModelFactory.getInstance(mApplication); } }
#include<stdio.h> int main() { int n,a,b,x=0,i,y,p; x=0; scanf("%d %d %d",&n,&a,&b); for(i=1;i<=n;i++) { p=i; y=0; while(1) { if(p>=10) { y=y+p%10; p=p/10; } else { y=y+p; break; }; }; if(y>=a&&y<=b) { x=x+i; }; }; printf("%d",x); return 0; }
package alteraa; // Arc Package import arc.*; import arc.util.*; // Mindustry Package import mindustry.*; import mindustry.content.*; import mindustry.game.EventType.*; import mindustry.gen.*; import mindustry.mod.*; import mindustry.ui.dialogs.*; // Alteraa Package import alteraa.content.*; // End public class AlteraaSector extends Mod{ public AlteraaSector(){} @Override public void loadContent(){ new AlteraaItems().load(); new AlteraaLiquids().load(); new AlteraaBullets().load(); new AlteraaUnits().load(); new AlteraaBlocks().load(); new AlteraaPlanets().load(); new AlteraaSectors().load(); new AlteraaTechTree().load(); } }
/** * Handles the given animation event, that occured in the minigame. * * @param event The animation event. */ public static void handleAnimationEvent(PlayerAnimationEvent event) { if(minigame != null) { minigame.handleAnimationEvent(event); } }
import { ExtensionContext } from 'vscode'; import { activate } from './extension'; describe('Extension', () => { test('Activate', () => { const context: ExtensionContext = { subscriptions: [], } as any; activate(context); expect(context.subscriptions.length).toBeGreaterThan(0); }); });
/* Version of matrix computing that substract reconciliation event scores to co-events in the adjacency matrix Takes: - WDupCost (double): Cost of a single reconciliation event, weighted so that this score can be added to an adjacency score. - WLossCost (double):Cost of a single reconciliation event, weighted so that this score can be added to an adjacency score. - WHgtCOst (double): Cost of a single reconciliation event, weighted so that this score can be added to an adjacency score. */ void AdjMatrix::computeMatrix(double WDupCost, double WLossCost, double WHgtCost) { WeightedDupCost = - WDupCost; WeightedLossCost = - WLossCost; WeightedHgtCost = - WHgtCost; if(useBoltzmann) { WeightedDupCost = exp( - (WeightedDupCost) / (BOLTZMANN_K * Temperature)) ; WeightedLossCost = exp( - (WeightedLossCost) / (BOLTZMANN_K * Temperature)) ; WeightedHgtCost = exp( - (WeightedHgtCost) / (BOLTZMANN_K * Temperature)) ; } computeMatrix(); }
Recurrent bilateral eyelid and conjunctival granulomatosis in Churg-Strauss syndrome. A 47-year-old woman with poorly controlled asthma and allergic rhinitis presented with recurrent episodes of bilateral upper eyelid swelling associated with forniceal conjunctival mass for the past 10 years. Routine blood investigations showed raised IgE levels and raised eosinophil counts. The diagnosis of Churg-Strauss syndrome (CSS) was made following biopsy of the conjunctival mass. The symptoms responded well to oral steroid treatment but recurred following cessation of the therapy. The patient was co-managed with a rheumatologist and the patient currently remains stable and is on oral Methotrexate and low dose oral steroids. Ocular involvement in CSS is unusual but this unique presentation of CSS was successfully managed, and the patient remains in remission.
Last week, Democrats on the House Science, Space and Technology Committee sent panel leadership a snarky letter positing that, if the committee was so interested in Hillary Clinton Hillary Diane Rodham ClintonSanders: 'I fully expect' fair treatment by DNC in 2020 after 'not quite even handed' 2016 primary Sanders: 'Damn right' I'll make the large corporations pay 'fair share of taxes' Former Sanders campaign spokesman: Clinton staff are 'biggest a--holes in American politics' MORE's email server scandal last year, it must also be interested in security shortcomings at the Trump White House. Monday, on a conference call previewing a a Tuesday hearing on cybersecurity, a GOP committee aide responded in kind. “From a certain standpoint, [the letter] can be seen as a positive sign because last Congress we undertook to look at the OPM cyber attack and breach, multiple breaches at the [Federal Deposit Insurance Corporation], attacks at the IRS and so on and so forth. And while we were looking at those things, the minority was telling us we were off on the wrong track, we were turning cybersecurity into politics and all the investigations into [Office of Personnel Management] and so forth were awful and illegitimate,” the aide said. “So any expression of support for the committee to discharge its authorities — and we do have them as far as [the Federal Information Security Management Act] and cybersecurity is concerned — is a good thing. “ ADVERTISEMENT Recent media reports claim President Trump continues to use his old, unsecured smartphone despite being issued a secure one, that Trump aides use private email accounts and that the president’s Twitter account had not been properly secured. Democratic Reps. Eddie Bernice Johnson (Texas), Don Beyer (Va.), and Dan Lipinski (Ill.) penned the letter to suggest using the Tuesday hearing to discuss these issues. “We are writing to inform the Committee of further opportunities to investigate Executive Branch cybersecurity issues that have been of intense interest to you in the past,” they wrote. A Democratic Science staffer said she anticipated the Democrats would ask questions along those lines Tuesday. The hearing is set to discuss recommendations to improve cybersecurity from the Commission on Enhancing National Cybersecurity and Center for Strategic International Studies published in December and January, respectively. The commission report was ordered by then-President Obama in April to provide some suggestions for the next administration. Aides for the Science Committee note that the Committee will not be able to discuss an important third document — a hotly anticipated cybersecurity executive order that has been in the works. Heading into Tuesday's hearing, Democratic staff pushed back on the characterization that the party was soft on cyber security issues, noting, for example, that Democrats took a hard line on the OPM and FDIC breaches. "I understand you just arrived at FDIC in November and that the [Chief Information Officer's] office has suffered from a lack of consistent leadership for some time," said Beyer in a May hearing addressing a new CIO at the FDIC, an agency House Science alleges has been uncooperative with the committee. "You are now the fourth CIO the FDIC has had in the past four years. I hope that you will be able to bring some stability to that office. But equally important is establishing a solid foundation built on reliability and openness with Congress. I hope that you will strive to do that as well." —Morgan Chalfant contributed. Updated Tuesday at 10:22 a.m.
#!/usr/bin/env python from setuptools import setup packages = ["cxroots", "cxroots.tests", "cxroots.contours"] # get the version, this will assign __version__ with open("cxroots/version.py") as f: exec(f.read()) # nosec # read the README_pip.rst try: with open("README.rst") as file: long_description = file.read() except IOError: long_description = None setup( name="cxroots", version=__version__, # noqa description="Find all the roots (zeros) of a complex analytic function within a " "given contour in the complex plane.", long_description=long_description, author="Robert Parini", author_email="[email protected]", url="https://rparini.github.io/cxroots/", license="BSD-3-Clause", data_files=[("", ["LICENSE"])], packages=packages, package_data={"cxroots": ["py.typed"]}, zip_safe=False, # prevent cxroots from installing as a .egg zip file platforms=["all"], python_requires=">=3.8", setup_requires=["pytest-runner"], install_requires=[ "numpy", "scipy", "numpydoc", "mpmath", "numdifftools>=0.9.39", "rich", ], tests_require=["pytest"], extras_require={"plot": ["matplotlib"]}, keywords="roots zeros complex analytic functions", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Mathematics", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3 :: Only", ], )
#include "bank_account.h" #include "checking_account.h" #include"savings_account.h" #include"customer.h" #include<iostream> #include<vector> #include<string> #include<memory> using std::cout; using std::cin; using std::unique_ptr; using std::make_unique; int main() { /* //c++ 98 SavingsAccount*s = new SavingsAccount(500); //more lot of code delete s; s = nullptr;*/ //c++ 11 unique_ptr<BankAccount> s = make_unique<SavingsAccount> ( 90 ); unique_ptr<BankAccount> c = make_unique<CheckingAccount> ( 100 ); std::vector<unique_ptr<BankAccount>> accounts; accounts.push_back(std::move(s)); accounst.push_back(std::move(c)); for (auto &act : accounts) { cout << act->get_balance() << "\n"; } /* BankAccount* act=new CheckingAccount(100); BankAccount account(500); Customer cust; cust.add_account(account); /* do you want to play again loop Ttt game; loop for mark board game ends call manager save game */ cin >> account; cout << account; display_balance(account); /*std::vector<BankAccount> accounts{ BankAccount(100), BankAccount(200) }; for (auto act : accounts) { cout << act.get_balance() << "\n"; } */ auto balance = account.get_balance(); cout << "Balance is: " << balance << "\n"; auto amount{ 0 }; cout << "Enter deposit amount: \n"; cin >> amount; try { account.deposit(amount); cout << "Balance is: \n" << account.get_balance; } catch (Invalid e) { cout << e.get_error() << "\n"; } return 0; }
/** * Convenience class that handles creation of widgets. * * @author Carl Hartung ([email protected]) */ public class WidgetFactory { private static final String PICKER_APPEARANCE = "picker"; private final Activity context; private final boolean readOnlyOverride; private final boolean useExternalRecorder; private final WaitingForDataRegistry waitingForDataRegistry; private final QuestionMediaManager questionMediaManager; private final Analytics analytics; private final AudioPlayer audioPlayer; private final ActivityAvailability activityAvailability; private final RecordingRequesterFactory recordingRequesterFactory; public WidgetFactory(Activity activity, boolean readOnlyOverride, boolean useExternalRecorder, WaitingForDataRegistry waitingForDataRegistry, QuestionMediaManager questionMediaManager, Analytics analytics, AudioPlayer audioPlayer, ActivityAvailability activityAvailability, RecordingRequesterFactory recordingRequesterFactory) { this.context = activity; this.readOnlyOverride = readOnlyOverride; this.useExternalRecorder = useExternalRecorder; this.waitingForDataRegistry = waitingForDataRegistry; this.questionMediaManager = questionMediaManager; this.analytics = analytics; this.audioPlayer = audioPlayer; this.activityAvailability = activityAvailability; this.recordingRequesterFactory = recordingRequesterFactory; } public QuestionWidget createWidgetFromPrompt(FormEntryPrompt prompt) { String appearance = WidgetAppearanceUtils.getSanitizedAppearanceHint(prompt); QuestionDetails questionDetails = new QuestionDetails(prompt, Collect.getCurrentFormIdentifierHash(), readOnlyOverride); PermissionUtils permissionUtils = new PermissionUtils(R.style.Theme_Collect_Dialog_PermissionAlert); final QuestionWidget questionWidget; switch (prompt.getControlType()) { case Constants.CONTROL_INPUT: switch (prompt.getDataType()) { case Constants.DATATYPE_DATE_TIME: questionWidget = new DateTimeWidget(context, questionDetails, new DateTimeWidgetUtils()); break; case Constants.DATATYPE_DATE: questionWidget = new DateWidget(context, questionDetails, new DateTimeWidgetUtils()); break; case Constants.DATATYPE_TIME: questionWidget = new TimeWidget(context, questionDetails, new DateTimeWidgetUtils()); break; case Constants.DATATYPE_DECIMAL: if (appearance.startsWith(WidgetAppearanceUtils.EX)) { questionWidget = new ExDecimalWidget(context, questionDetails, waitingForDataRegistry); } else if (appearance.equals(WidgetAppearanceUtils.BEARING)) { questionWidget = new BearingWidget(context, questionDetails, waitingForDataRegistry, (SensorManager) context.getSystemService(Context.SENSOR_SERVICE)); } else { questionWidget = new DecimalWidget(context, questionDetails); } break; case Constants.DATATYPE_INTEGER: if (appearance.startsWith(WidgetAppearanceUtils.EX)) { questionWidget = new ExIntegerWidget(context, questionDetails, waitingForDataRegistry); } else { questionWidget = new IntegerWidget(context, questionDetails); } break; case Constants.DATATYPE_GEOPOINT: if (hasAppearance(questionDetails.getPrompt(), PLACEMENT_MAP) || hasAppearance(questionDetails.getPrompt(), MAPS)) { questionWidget = new GeoPointMapWidget(context, questionDetails, waitingForDataRegistry, new ActivityGeoDataRequester(permissionUtils)); } else { questionWidget = new GeoPointWidget(context, questionDetails, waitingForDataRegistry, new ActivityGeoDataRequester(permissionUtils)); } break; case Constants.DATATYPE_GEOSHAPE: questionWidget = new GeoShapeWidget(context, questionDetails, waitingForDataRegistry, new ActivityGeoDataRequester(permissionUtils)); break; case Constants.DATATYPE_GEOTRACE: questionWidget = new GeoTraceWidget(context, questionDetails, waitingForDataRegistry, MapProvider.getConfigurator(), new ActivityGeoDataRequester(permissionUtils)); break; case Constants.DATATYPE_BARCODE: questionWidget = new BarcodeWidget(context, questionDetails, waitingForDataRegistry, new CameraUtils()); break; case Constants.DATATYPE_TEXT: String query = prompt.getQuestion().getAdditionalAttribute(null, "query"); if (query != null) { questionWidget = getSelectOneWidget(appearance, questionDetails); } else if (appearance.startsWith(WidgetAppearanceUtils.PRINTER)) { questionWidget = new ExPrinterWidget(context, questionDetails, waitingForDataRegistry); } else if (appearance.startsWith(WidgetAppearanceUtils.EX)) { questionWidget = new ExStringWidget(context, questionDetails, waitingForDataRegistry); } else if (appearance.contains(WidgetAppearanceUtils.NUMBERS)) { questionWidget = new StringNumberWidget(context, questionDetails); } else if (appearance.equals(WidgetAppearanceUtils.URL)) { questionWidget = new UrlWidget(context, questionDetails, new CustomTabHelper()); analytics.logEvent(PROMPT, "Url", questionDetails.getFormAnalyticsID()); } else { questionWidget = new StringWidget(context, questionDetails); } break; default: questionWidget = new StringWidget(context, questionDetails); break; } break; case Constants.CONTROL_FILE_CAPTURE: questionWidget = new ArbitraryFileWidget(context, questionDetails, questionMediaManager, waitingForDataRegistry); break; case Constants.CONTROL_IMAGE_CHOOSE: if (appearance.equals(WidgetAppearanceUtils.SIGNATURE)) { questionWidget = new SignatureWidget(context, questionDetails, questionMediaManager, waitingForDataRegistry); } else if (appearance.contains(WidgetAppearanceUtils.ANNOTATE)) { questionWidget = new AnnotateWidget(context, questionDetails, questionMediaManager, waitingForDataRegistry); } else if (appearance.equals(WidgetAppearanceUtils.DRAW)) { questionWidget = new DrawWidget(context, questionDetails, questionMediaManager, waitingForDataRegistry); } else { questionWidget = new ImageWidget(context, questionDetails, questionMediaManager, waitingForDataRegistry); } break; case Constants.CONTROL_OSM_CAPTURE: questionWidget = new OSMWidget(context, questionDetails, waitingForDataRegistry); break; case Constants.CONTROL_AUDIO_CAPTURE: RecordingRequester recordingRequester = recordingRequesterFactory.create(prompt, useExternalRecorder); questionWidget = new AudioWidget(context, questionDetails, questionMediaManager, audioPlayer, recordingRequester, new GetContentAudioFileRequester(context, activityAvailability, waitingForDataRegistry)); break; case Constants.CONTROL_VIDEO_CAPTURE: questionWidget = new VideoWidget(context, questionDetails, questionMediaManager, waitingForDataRegistry); break; case Constants.CONTROL_SELECT_ONE: questionWidget = getSelectOneWidget(appearance, questionDetails); break; case Constants.CONTROL_SELECT_MULTI: // search() appearance/function (not part of XForms spec) added by SurveyCTO gets // considered in each widget by calls to ExternalDataUtil.getSearchXPathExpression. // This means normal appearances should be put before search(). if (appearance.contains(WidgetAppearanceUtils.MINIMAL)) { questionWidget = new SelectMultiMinimalWidget(context, questionDetails, waitingForDataRegistry); } else if (appearance.startsWith(WidgetAppearanceUtils.LIST_NO_LABEL)) { questionWidget = new ListMultiWidget(context, questionDetails, false); } else if (appearance.startsWith(WidgetAppearanceUtils.LIST)) { questionWidget = new ListMultiWidget(context, questionDetails, true); } else if (appearance.startsWith(WidgetAppearanceUtils.LABEL)) { questionWidget = new LabelWidget(context, questionDetails); } else if (appearance.startsWith(WidgetAppearanceUtils.IMAGE_MAP)) { questionWidget = new SelectMultiImageMapWidget(context, questionDetails); } else { questionWidget = new SelectMultiWidget(context, questionDetails); } break; case Constants.CONTROL_RANK: questionWidget = new RankingWidget(context, questionDetails); break; case Constants.CONTROL_TRIGGER: questionWidget = new TriggerWidget(context, questionDetails); break; case Constants.CONTROL_RANGE: if (appearance.startsWith(WidgetAppearanceUtils.RATING)) { questionWidget = new RatingWidget(context, questionDetails); } else { switch (prompt.getDataType()) { case Constants.DATATYPE_INTEGER: if (prompt.getQuestion().getAppearanceAttr() != null && prompt.getQuestion().getAppearanceAttr().contains(PICKER_APPEARANCE)) { questionWidget = new RangePickerIntegerWidget(context, questionDetails); } else { questionWidget = new RangeIntegerWidget(context, questionDetails); } break; case Constants.DATATYPE_DECIMAL: if (prompt.getQuestion().getAppearanceAttr() != null && prompt.getQuestion().getAppearanceAttr().contains(PICKER_APPEARANCE)) { questionWidget = new RangePickerDecimalWidget(context, questionDetails); } else { questionWidget = new RangeDecimalWidget(context, questionDetails); } break; default: questionWidget = new StringWidget(context, questionDetails); break; } } break; default: questionWidget = new StringWidget(context, questionDetails); break; } return questionWidget; } private QuestionWidget getSelectOneWidget(String appearance, QuestionDetails questionDetails) { final QuestionWidget questionWidget; boolean isQuick = appearance.contains(WidgetAppearanceUtils.QUICK); // search() appearance/function (not part of XForms spec) added by SurveyCTO gets // considered in each widget by calls to ExternalDataUtil.getSearchXPathExpression. // This means normal appearances should be put before search(). if (appearance.contains(WidgetAppearanceUtils.MINIMAL)) { questionWidget = new SelectOneMinimalWidget(context, questionDetails, isQuick, waitingForDataRegistry); } else if (appearance.contains(WidgetAppearanceUtils.LIKERT)) { questionWidget = new LikertWidget(context, questionDetails); } else if (appearance.contains(WidgetAppearanceUtils.LIST_NO_LABEL)) { questionWidget = new ListWidget(context, questionDetails, false, isQuick); } else if (appearance.contains(WidgetAppearanceUtils.LIST)) { questionWidget = new ListWidget(context, questionDetails, true, isQuick); } else if (appearance.equals(WidgetAppearanceUtils.LABEL)) { questionWidget = new LabelWidget(context, questionDetails); } else if (appearance.contains(WidgetAppearanceUtils.IMAGE_MAP)) { questionWidget = new SelectOneImageMapWidget(context, questionDetails, isQuick); } else { questionWidget = new SelectOneWidget(context, questionDetails, isQuick); } return questionWidget; } }
/** * Represents a change event on a file. */ public class FileRecord { public enum FileState { NEW, DELETED, MODIFIED }; public interface Process { public void process(FileRecord record); } protected File file; protected FileState state; public FileRecord(File file, FileState state) { this.file = file; this.state = state; } public File getFile() { return file; } public FileState getState() { return state; } }
Identification of High-Risk Patients with Non-ST Segment Elevation Myocardial Infarction using Strain Doppler Echocardiography: Correlation with Cardiac Magnetic Resonance Imaging Assessment of left ventricular (LV) function is important for decision-making and risk stratification in patients with acute coronary syndrome. Many patients with non-ST segment elevation myocardial infarction (NSTEMI) have substantial infarction, but these patients often do not reveal clinical signs of instability, and they rarely fulfill criteria for acute revascularization therapy. AIM This study evaluated the potential of strain Doppler echocardiography analysis for the assessment of LV infarct size when compared with standard two-dimensional echo and cardiac magnetic resonance (CMR) data. METHODS Thirty patients with NSTEMI were examined using echocardiography after hospitalization for 1.8 ± 1.1 days for the assessment of left ventricular ejection fraction, wall motion score index (WMSI), and LV global longitudinal strain (GLS). Infarct size was assessed using delayed enhancement CMR 6.97 ± 3.2 days after admission as a percentage of total myocardial volume. RESULTS GLS was performed in 30 patients, and 82.9% of the LV segments were accepted for GLS analysis. Comparisons between patients with a complete set of GLS and standard echo, GLS and CMR were performed. The linear relationship demonstrated moderately strong and significant associations between GLS and ejection fraction (EF) as determined using standard echo (r = 0.452, P = 0.012), WMSI (r = 0.462, P = 0.010), and the gold standard CMR-determined EF (r = 0.57, P < 0.001). Receiver operating characteristic curves were used to analyze the ability of GLS to evaluate infarct size. GLS was the best predictor of infarct size in a multivariate linear regression analysis (β = 1.51, P = 0.027). WMSI >1.125 and a GLS cutoff value of −11.29% identified patients with substantial infarction (≥12% of total myocardial volume measured using CMR) with accuracies of 76.7% and 80%, respectively. However, GLS remained the only independent predictor in a multivariate logistic regression analysis to identify an infarct size ≥12%. CONCLUSION GLS is a good predictor of infarct size in NSTEMI, and it may serve as a tool in conjunction with risk stratification scores for the selection of high-risk NSTEMI patients. Introduction Coronary artery disease (CAD) is the most prevalent manifestation of cardiovascular diseases, and it is associated with high mortality and morbidity. 1 Registry data consistently demonstrated that the incidence of non-ST segment elevation myocardial infarction (NSTEMI) has increased, and it is more frequent than ST elevation myocardial infarction (STEMI). 2 Infarct size is a strong prognostic indicator of mortality and major adverse cardiovascular events after myocardial infarction (MI), and the transmural extent of MI is related to the probability of functional recovery after acute revascularization. Revascularization limits infarct size and salvages viable myocardium, which has dramatically improved the prognosis of patients presenting with acute coronary syndromes. Many patients with NSTEMI exhibit substantial infarction, 3,10 but these patients often do not develop ST segment elevation or reveal clinical signs of instability, and the criteria for acute reperfusion therapy are rarely fulfilled. Techniques based on echocardiography, nuclear imaging, and magnetic resonance are used in current clinical practice to estimate infarct size and identify viable myocardial segments that may benefit from coronary revascularization. 4,11,12 However, the routine use of these advanced imaging technologies in daily clinical practice is constrained by availability, costs, and logistics. Acute coronary occlusion induces left ventricular (LV) systolic dysfunction, which is quantified using echocardiography and correlates with infarct size, and this method may be applicable during the very early development phase of MI. Strain echocardiography is an accurate and validated measure of regional systolic LV function that exhibits an excellent ability to differentiate between different levels of infarct size. Strain echocardiography is a validated and accurate measure of regional LV systolic function in correlation with the standard assessment of final infarct size using contrast-enhanced magnetic resonance imaging (CE-MRI). Strain echocardiography was recently validated as a prognostic indicator. 19,20 objectives The present study evaluated the potential of strain Doppler echocardiography analysis for the assessment of LV infarct size in NSTEMI patients when compared with standard two-dimensional (2D) echo and cardiac magnetic resonance (CMR) data. methods Patients. The study enrolled 30 patients who presented with recent NSTEMI. All patients were clinically and hemodynamically stable during index admission, and none of the patients were referred for urgent coronary intervention. All patients received optimal medical therapy based on current guidelines. 21 exclusion criteria. clinical and laboratory evaluations. An independent physician who was blinded to all data performed baseline clinical and laboratory evaluations, including risk factors, Killip class assessment, and ECG. GRACE risk score for NSTEMI was calculated during admission to predict mortality and reinfarction in hospital and at six months. According to GRACE risk score, patients were categorized as low (#108), intermediate (109-140), and high risk (.140). 22 Cardiac markers (eg, troponin I, creatinine kinase enzyme ) were measured in serial samples with at least three samples with six hours in-between. echocardiography. All patients underwent standard echocardiographic examinations within 48 hours from NSTEMI using standard commercially available equipment (HD11 XE PHILIPS) with a phased array transducer. Left ventricular ejection fraction (LVEF) was calculated from apical four-chamber images using the modified Simpson's method. Wall motion score was assessed in a 16-segment model, and segmental wall motion was judged on the basis of the observed wall thickening and endocardial motion of the myocardial segment as normal = 1, hypokinetic (reduced thickening) = 2, akinetic (absent or negligible thickening) = 3, and dyskinetic (systolic thinning or stretching) = 4. Wall motion score index (WMSI) was measured as the average of the scores of all segments visualized. 23 strain analysis. Three consecutive cycles in three apical planes (four-chamber, two-chamber, and long-axis planes) were obtained using color tissue Doppler echocardiography while holding the breath at the end of expiration to minimize translational movement of the heart, and loops were digitally stored and analyzed offline using Q lab. Aliasing in the velocity mode longitudinal strain was measured using tissue Doppler echocardiography in a 16-segment LV model. Peak negative strain and strain rate, which represent maximum longitudinal shortening, were measured for each segment and obtained from one of the three consecutive cardiac cycles, not as an average of the three cycles. Values of all segments were averaged to obtain global longitudinal strain (GLS). Infarct zone strain was measured as an averaged strain value from the infarcted segments that were detected using contrast-enhanced cardiac magnetic resonance (CE-CMR). cardiac magnetic resonance imaging. CMR was performed for all patients 6.97 ± 3.2 days from admission using 1.5 T scanners equipped with master gradients and a dedicated cardiac software package (Philips Medical Systems). Cine steady-state free precession (SSFP) sequences in different planes (two-chamber, four-chamber, and ventricular short-axis planes) were used primarily for quantitative ventricular measurements. Slice thickness was ∼8 mm with no gap in-between. Images were obtained while holding the breath at the end of expiration to minimize variations in the position of the diaphragm and the heart. Quantitative evaluation of ventricular function was achieved by obtaining a series of contiguous SSFP cine MRI slices that covered the ventricles in short-axis views. Myocardial delayed enhancement sequences were performed 10-20 minutes after the administration of 0.15 mmol/kg gadolinium-based contrast. MDE was first performed using several inversion times (TI scout) to select the one that best nulls the myocardium. MDE was performed in short-and long-axis planes using this inversion time. Total myocardial area was measured on each short-axis image by manually drawing the endocardium and epicardium in enddiastolic frames, and the area of infarcted myocardium was manually traced. Final infarct size was calculated as a percentage of infarct volume/total myocardial volume. Short-and long-term mortality rates are increased in patients with infarct size $12%. 3,4 Therefore, we classified the patients into two groups with infarct sizes ,12% and $12%. Segmental transmurality was calculated in a 17-segment model of the LV myocardium (the basal and mid-ventricular short-axis slices were divided into six segments, the apical short-axis slices were divided into four segments, and infarcts at the apical cap were detected in two-chamber slices) as the infarct volume divided by myocardial volume per segment, and segments with $50% contrast enhancement were judged transmurally infarcted. 15 statistical analysis. Data were analyzed using IBM SPSS software package version 20.0. Qualitative data are described using numbers and percents. Quantitative data are described using means and standard deviation, medians, and minimum and maximum. Comparisons between different groups of categorical variables were performed using the chi-square test. The distributions of quantitative variables were tested for normality using the Kolmogorov-Smirnov test, Shapiro-Wilk test, and D'Agostino test, and histograms and QQ plots were used for vision tests. Parametric tests were applied for normally distributed data. Nonparametric tests were used for abnormally distributed data. Comparisons between two independent populations for normally distributed data were performed using independent t-tests. Comparisons between two independent populations for abnormally distributed data were performed using Mann-Whitney tests, and the Kruskal-Wallis test was used to compare different groups. Correlations between two quantitative variables were assessed using the Spearman coefficient. We used intraclass correlation coefficients in 10 randomly selected patients for reproducibility (interobserver variability and intraobserver variability), and two independent observers analyzed the strain in each segment. Agreement of different predictive factors with the outcome was used and expressed in sensitivity, specificity, positive predictive value, negative predictive value, and accuracy. Receiver operating characteristic (ROC) curve was plotted to analyze a recommended cutoff, and the area under the ROC curve denoted the diagnostic performance of the test. An area .50% denotes acceptable performance, and an area ∼100% is the best performance for the test. Multivariate linear regression was assessed. Univariate and multivariate logistic regressions were assessed. Significant test results are quoted as two-tailed probabilities. Significance of the obtained results was judged at the 5% level. 24,25 ethics statement. The review board of the Faculty of Medicine, Alexandria University, reviewed and approved this study. All patients were informed about the technique, and we obtained informed consent from all participants. Our research complied with the principles of the Declaration of Helsinki. results Patients' characteristics. Table 1 provides an overview of demographic and clinical characteristics of the study population. echocardiography and strain analysis. Echocardiographic evaluations were performed in all patients within 33.17 ± 14.03 hours of admission (range 5-48 hours). Standard echocardiography for evaluations of LVEF, LV volumes, and WMSI was performed for all patients. LVEF% was 56.82 ± 6.19% (range 42.6%-71.6%). A total of 480 segments were examined for wall motion abnormalities, and 86 of these segments (18%) were hypokinetic. The mean WMSI was 1.18 ± 0.14 (range 1.0-1.44). A total of 398 segments (82.9%) were accepted for GLS, and loops of color tissue Doppler were analyzed with a mean frame rate of 116.4 ± 16.79 Hz. GLS was −11.77 ± 2.54 (range −17.47 to −6.88). Intraclass correlation coefficients for interobserver and intraobserver variabilities of GLS were 0.948 and 0.967, respectively. Infarct transmurality and strain analysis. There were significant positive correlations between segmental peak negative strain and segmental transmurality (r = 0.587, P , 0.001) and strain rate and the corresponding segmental transmurality (r = 0.377, P , 0.001; Fig. 2). Segmental peak negative stain and strain rate significantly separated remote segments without infarction from segments with nontransmural or transmural infarction (P , 0.001 for all). However, these factors did not differentiate between nontransmural and transmural infarctions (P = 0.227 and P = 0.075, respectively). Segmental strain .−9.1 had a specificity of 72.42% and a sensitivity of 71.79% to detect transmural infarctions (ie, transmurality $50%). Linear regression model predicting the infarct size percent (without cutoff point) revealed that only GLS significantly predicted the infarct size percent (P = 0.027) after adjusting the other factors (troponin, CK-MB, 2D ejection fraction , and WMSI; Table 2). echocardiography with respect to infarct size. LVEF, WMSI, and GLS discriminated between patients with infarct size ,12% and $12% with significant differences between the groups (Table 3 and Table 4). ROC analysis (Fig. 4) demonstrated that GLS exhibited a good ability to identify the patients with infarct size $12%, and it was superior to WMSI, EF, and cardiac biomarkers. Angiographic findings. Twenty-seven patients underwent coronary angiography during the index hospitalization, 10 patients had single-vessel CAD, 5 patients had two-vessel CAD, and 12 patients had multivessel CAD. Sixteen of these patients underwent PCI for the culprit artery during the index procedure, six patients were referred for surgical revascularization, and five patients were advised to receive medical treatment. discussion The results of the present study confirm the usefulness of noninvasive imaging to identify high-risk patients presenting with acute NSTEMI. We found a significant correlation between tissue Doppler-derived GLS and infarct size. The current guidelines recommend acute reperfusion therapy only in clinically unstable NSTEMI patients. 21 This strategy may be inadequate in the subgroup of NSTEMI patients who have substantial infarction. Our findings indicate that GLS evaluation provides an accurate assessment of the global myocardial function and the presence of segments with a transmural extent of necrosis. These results provide important clinical implications because these data may provide a means for the early identification of patients who have large and significant infarction and are at high risk. Echocardiography is the only bedside, noninvasive method to identify these patients. The difference between STEMI and NSTEMI in the emergency room by definition is electrophysiological not pathophysiological. A subgroup of NSTEMI patients has substantial infarction, and there is a significant overlap in final infarct size between STEMI and NSTEMI patients. Even patients with a normal ECG may develop large infarctions. 10,26 Infarct size is a strong predictor of mortality and major adverse cardiovascular events. 5 The current reperfusion therapy is highly effective in reducing infarct size. The relative reduction of infarct size achieved is typically 40% by using thrombolysis and 60% by using primary PCI. 27 These procedures salvage viable myocardium and favorably influence infarct healing. 28,29 Mortality rate is markedly reduced in the era of reperfusion therapy, and this benefit is largely attributed to a reduction of infarct size. 5,6 The association of infarct size with increased mortality rate was demonstrated in previous studies, 3,4 which found that an infarct size $12% was associated with increased mortality. One of these studies was a large trial that found 12% to be the median infarct size in STEMI patients treated using acute reperfusion, and an IS ,12% of the LV was associated with low mortality. Therefore, we used 12% as the cutoff to identify patients who were at high risk. There were 12 patients with infarct size $12% in our study, which accounted for ∼40% of the total number of patients. This fraction may have occurred because our patients were from a tertiary referral hospital that serves a wide area. Therefore, the patients referred to this hospital were relatively at high risk. diagnostic tools for the early assessment of infarct size. The time window for myocardial salvage after acute occlusion of a coronary artery is narrow. Therefore, a tool to predict infarct size should be based on information that can be obtained from the emergency department. Patients' characteristics, risk factors, and ECG changes did not correlate with infarct size in the present study and could not identify patients with substantial infarction. Cardiac biomarkers, including CK-MB and troponin, correlated well with infarct size and identified patients with substantial infarction. The amount of enzymes depleted from the heart is proportional to the size of the infarction. However, only a small percentage of the amount depleted reaches the circulation in the absence of coronary revascularization. The remainder is hydrolyzed locally or in lymph tissue. 30 Serial samples permit a mathematical modeling of the amount of enzymes depleted, and therefore, an estimate of infarct size as long as a predictable relationship exists between the amount depleted and the amount that reaches the circulation. Previous studies found that different parameters of cardiac enzyme measurements correlated with infarct size, except for admission and early (, 12 hours) after the onset of chest pain measurements. This correlation is primarily the result of the slow release of these biomarkers. Prediction of infarct size using echocardiography. Myocardial systolic function is dependent on a continuous blood supply and deteriorates within seconds of acute coronary occlusion, 38 even before the onset of necrosis. Therefore, one important advantage of echocardiography is that it can identify patients with developing substantial infarction at a time point when the intervention may achieve a significant myocardial salvage. LVEF using echocardiography is a well-established tool to describe LV function, and it has traditionally been used to assess the degree of myocardial damage, as a predictor of outcome and a marker of early and late complications after AMI. However, the assessment of LV damage using echocardiography shortly after revascularization provides less precise infarct size estimates as a result of stunning and the inability of resting myocardial dynamics to convincingly distinguish reversible from irreversible dysfunctional myocardium. 38-40 LVEF measured using echocardiography correlated well with LVEF measured using CMR in our study, and both correlated moderately with infarct size. However, the correlation was stronger with EF measured using CMR, which identified patients with substantial infarction, but at a low sensitivity and specificity. One possible reason is that LVEF describes the global LV function, but the infarcted area and reduced function are regional, and a decrease in LVEF supposes that several LV segments are involved. LVEF is primarily a measurement of the radial contraction of the wall plus some longitudinal contraction, which is reflected by the mitral plane movement. WMSI is another established parameter of LV systolic function that is a validated prognostic indicator after MI. 41 Previous studies consistently demonstrated that evaluations of wall motion abnormalities perform better than volume-based LV indices in early risk assessment for MI patients. 41,42 However, this technique is semiquantitative, experience dependent, and based on the subjective interpretation of myocardial motion. Our study found a strong correlation between WMSI and infarct size measured using CE-CMR (R 2 = 0.381, P , 0.001). We also found that WMSI at a cutoff of 1.3 identified the patients with an infarct size $12% with a sensitivity of 77% and specificity of 92%. strain doppler echocardiography for the prediction of infarct size. All functional indices based on myocardial deformation (including direct visual assessment, wall thickening, and strain measurements) are load-dependent. This load or local stress dependence is even more complex in the ischemic or infarcted part of the LV, where the mechanical behavior of infarcted subendocardial myocardium is largely dictated by the preserved subepicardial layers. 43,44 Similarly, the function of preserved myocardium adjacent to the infarct border is heavily dependent on the mechanical behavior of remote noninfarcted regions. 45,46 Longitudinal strain echocardiography is a relatively new parameter that is not widely used in clinical practice. This technique quantifies contraction only in the longitudinal direction. This longitudinal vector of the LV three-dimensional contraction pattern primarily represents subendocardial contraction, and it is likely more sensitive to ischemia when compared with the radial contraction caused by the deformation of the circumferential midmyocardial and subepicardial layers. 47 Therefore, global strain may be a better indicator of myocardial damage due to ischemia. Global strain reflects the averaged segmental myocardial long-axis relative shortening, and it is a global functional measurement that may provide information beyond what is available from WMSI and LVEF. 48 GLS was −11.77% ± 2.54% (range −17.47% to −6.88%) in our study. The estimated values of GLS in our study were much lower than those of previously reported studies on healthy individuals. 49,50 GLS was also strongly correlated with infarct size (r = 0.601, P , 0.001). The estimated values of averaged strain from infarct zone (−7.93% ± 2.27) were lower when compared with the values obtained from remote areas (−13.97 ± 2.59). However, strain of the infarcted zone was not significantly correlated with infarct size. GLS was the only independent predictor of infarct size (P = 0.027) when entered as a covariate with other measures of LV systolic function and cardiac biomarkers in a linear regression analysis. GLS at −11.29% could identify patients with infarct size $12% with a sensitivity of 83.33% and specificity of 77.78% (area under the curve = 0.824). GLS remained a significant and an independent predictor to identify patients with infarct size $12% when entered as a covariate in a logistic regression model, along with troponin, CK-MB, EF, and WMSI. transmurality. The evaluation of transmural myocardial infarct extension ($50%) is predictive of myocardial viability. 51 We found a significant correlation between infarct transmurality and segmental peak negative strain and strain rate in corresponding segments (r = 0.587 and r = 0.377, respectively, P , 0.001 for both). However, the relationship was not as strong as between global strain and total infarct size (r = 0.601). One important reason for this finding is that the size and position of each myocardial segment using MRI and echocardiography are not identical. The effect of measurement variability of the two techniques is also reduced by calculating the total infarct size and global strain. Peak negative segmental longitudinal strain also discriminated normal from necrotic myocardium (P , 0.001), but it did not differentiate between nontransmurally and transmurally infarcted segments (P = 0.227). The infarction in NSTEMI is predominantly subendocardial, and 70% of infarcted segments in our study were subendocardial. The subendocardial layer of the myocardium contains predominantly longitudinal fibers, and the resolution is optimal because longitudinal strain tracks motions parallel to the ultrasound beam, which contrasts the motion beyond the subendocardial layer where circumferential fibers are predominantly found. This factor explains why the deterioration of longitudinal strain is not more pronounced in segments with transmural necrosis. conclusions GLS is a good predictor of infarct size in NSTEMI, and it may serve as a tool in conjunction with risk stratification scores for the selection of high-risk NSTEMI patients who may benefit from urgent revascularization. These findings may be of health economic interest and possess several potential clinical implications. study limitations A technical limitation is that poor echocardiographic quality in some patients interfered with tissue Doppler-derived strain. A total of 17.1% of the total number of segments were not feasible for strain measurement in our study. Infarct expansion and edema developing during the first hours may be accompanied by alterations in systolic function, and CMR examination was performed one to two days after echocardiographic evaluation. In our study protocol, we chose to select a subgroup of clinically and hemodynamically stable patients with recent NSTEMI, undergoing PCI as a second therapeutic option for recurrent angina. This is a very common typology of ischemic patients in the clinical practice of our hospital, considering the limited availability of early intervention in patients with NSTEMI in our country. The sample size was relatively small, resulting in the lack of clinical end points, and larger studies are needed to validate these results.
import { Inject } from '../model/injector'; import { View, injectModel, ViewG, ViewState, ViewV, serviceInjectModel } from './view'; import { ListViewV, SubView } from '../control/list'; /** * AncView 祖先视图,继承该视图指示tinyts托管的内容 * 关于延迟加载的说明(如果要使用此功能,请务必将AncView绑定到一个container元素) * tinyts的异步加载过程会导致页面元素的变化,给用户带来不好的体验 * 因此需要在加载之前将tinyts托管的部分隐藏,请在container元素上加上style="display:none" * tinyts在完成注入后,会去除这个style以显示container的内容 * 注意:请尽量不要在container上加上display:none以外的style属性,可能会引起不可预知的错误 */ export class AncView extends View { /** * AncView 祖先视图,包含注入功能 * @param selector 祖先试图选择器 */ constructor(selector?: string) { super(); // 绑定该视图 var viewId = this.constructor.toString().match(/^function\s*([^\s(]+)/)[1]; if (!selector) { selector = `#${viewId}`; } this.SetSelector(selector); this.SetName(viewId); this.LoadView(); this.Inject(); this.Show(); } /** * Show 移除style中的display:none */ protected Show() { if (this.state == ViewState.LOADSUCC) { var style = this.target.attr("style"); if (!style) { return; } var aa = /display\s*:\s*none;?/; style = style.replace(aa, ""); this.target.attr("style", style); } } } /** * v decorator 用于标记一个通过ID绑定的View * @param T 目标视图的类型(如果是ViewG,则要求视图实现T的方法,如果是View则不限制) * @param c View的构造函数 * @param selector 选择器 */ export function v<T>(c: { new(...args: any[]): ViewG<T> | View }, selector?: string): PropertyDecorator { /** * 该函数运行在View上 * @param target View实例 * @param decoratedPropertyName 属性名 */ return (target: T, decoratedPropertyName: string) => { const targetType: { __inject__?: Object } = target.constructor; // 目标view的名称 var name = target.constructor.toString().match(/^function\s*([^\s(]+)/)[1]; if (!targetType.hasOwnProperty(`__inject__`)) { targetType[`__inject__`] = {}; } if (!targetType["__inject__"][name]) { targetType["__inject__"][name] = { constructor: target.constructor }; } if (!targetType[`__inject__`][name]["views"]) { targetType[`__inject__`][name]["views"] = []; } var temp = new injectModel(); temp.creator = c; temp.propertyName = decoratedPropertyName; temp.selector = selector == null ? `#${decoratedPropertyName}` : selector; targetType[`__inject__`][name]["views"].push(temp); }; } export function vlist<T, U extends SubView<T>>(c: { new(...args: any[]): ListViewV<T, U> }, v: { new(...args: any[]): View }, selector?: string): PropertyDecorator { /** * 该函数运行在ListViewV上 * @param target View实例 * @param decoratedPropertyName 属性名 */ return (target: T, decoratedPropertyName: string) => { const targetType: { __inject__?: Object } = target.constructor; // 目标view的名称 var name = target.constructor.toString().match(/^function\s*([^\s(]+)/)[1]; if (!targetType.hasOwnProperty(`__inject__`)) { targetType[`__inject__`] = {}; } if (!targetType["__inject__"][name]) { targetType["__inject__"][name] = { constructor: target.constructor }; } if (!targetType[`__inject__`][name]["views"]) { targetType[`__inject__`][name]["views"] = []; } var temp = new injectModel(); temp.creator = c; temp.propertyName = decoratedPropertyName; temp.selector = selector == null ? `#${decoratedPropertyName}` : selector; temp.params = []; temp.params.push(v); targetType[`__inject__`][name]["views"].push(temp); }; } /** * f decorator 用于声明虚拟视图的html文件 * @param url html文件的url地址 */ export function f(url: string) { /** * 该函数运行在ViewV的构造函数上 * @param constructor ViewV的构造函数 */ return (constructor: { new(...args: any[]): ViewV<any> }) => { constructor["__url__"] = url; }; } /** * 用于声明需要注入的service * @param s service的构造函数 */ export function s<T>(s: { new(...args: any[]): T }) { /** * 该函数运行在View上 * @param target View实例 * @param decoratedPropertyName 属性名 */ return (target: View, decoratedPropertyName: string) => { const targetType: { __inject__?: Object } = target.constructor; // 目标view的名称 var name = target.constructor.toString().match(/^function\s*([^\s(]+)/)[1]; if (!targetType.hasOwnProperty(`__inject__`)) { targetType[`__inject__`] = {}; } if (!targetType["__inject__"][name]) { targetType["__inject__"][name] = { constructor: target.constructor }; } if (!targetType[`__inject__`][name]["services"]) { targetType[`__inject__`][name]["services"] = []; } var temp = new serviceInjectModel(); temp.creator = s; temp.propertyName = decoratedPropertyName; targetType[`__inject__`][name]["services"].push(temp); }; }
x = input() flag = False for a in range(1,1+x): for b in range(1,1+x): if not flag and a %b == 0 and a*b>x and a/b<x: print a, b flag = True if not flag: print(-1)
/** * HDFSStorage is developed to store and retrieve the data from HDFS * <p /> * The properties that can be set on HDFSStorage are: <br /> * baseDir - The base directory where the data is going to be stored <br /> * restore - This is used to restore the application from previous failure <br /> * blockSize - The maximum size of the each file to created. <br /> * * @since 0.9.3 */ public class HDFSStorage implements Storage, Configurable, Component<com.datatorrent.api.Context> { public static final int DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024; public static final String BASE_DIR_KEY = "baseDir"; public static final String RESTORE_KEY = "restore"; public static final String BLOCKSIZE = "blockSize"; public static final String BLOCK_SIZE_MULTIPLE = "blockSizeMultiple"; public static final String NUMBER_RETRY = "retryCount"; private static final String OFFSET_SUFFIX = "-offsetFile"; private static final String BOOK_KEEPING_FILE_OFFSET = "-bookKeepingOffsetFile"; private static final String FLUSHED_IDENTITY_FILE = "flushedCounter"; private static final String CLEAN_OFFSET_FILE = "cleanoffsetFile"; private static final String FLUSHED_IDENTITY_FILE_TEMP = "flushedCounter.tmp"; private static final String CLEAN_OFFSET_FILE_TEMP = "cleanoffsetFile.tmp"; private static final int IDENTIFIER_SIZE = 8; private static final int DATA_LENGTH_BYTE_SIZE = 4; /** * Number of times the storage will try to get the filesystem */ private int retryCount = 3; /** * The multiple of block size */ private int blockSizeMultiple = 1; /** * Identifier for this storage. */ @NotNull private String id; /** * The baseDir where the storage facility is going to create files. */ @NotNull private String baseDir; /** * The block size to be used to create the storage files */ private long blockSize; /** * */ private boolean restore; /** * This identifies the current file number */ private long currentWrittenFile; /** * This identifies the file number that has been flushed */ private long flushedFileCounter; /** * The file that stores the fileCounter information */ // private Path fileCounterFile; /** * The file that stores the flushed fileCounter information */ private Path flushedCounterFile; private Path flushedCounterFileTemp; /** * This identifies the last cleaned file number */ private long cleanedFileCounter; /** * The file that stores the clean file counter information */ // private Path cleanFileCounterFile; /** * The file that stores the clean file offset information */ private Path cleanFileOffsetFile; private Path cleanFileOffsetFileTemp; private FileSystem fs; private FSDataOutputStream dataStream; ArrayList<DataBlock> files2Commit = new ArrayList<DataBlock>(); /** * The offset in the current opened file */ private long fileWriteOffset; private FSDataInputStream readStream; private long retrievalOffset; private long retrievalFile; private int offset; private long flushedLong; private long flushedFileWriteOffset; private long bookKeepingFileOffset; private byte[] cleanedOffset = new byte[8]; private long skipOffset; private long skipFile; private transient Path basePath; private ExecutorService storageExecutor; private byte[] currentData; private FSDataInputStream nextReadStream; private long nextFlushedLong; private long nextRetrievalFile; private byte[] nextRetrievalData; public HDFSStorage() { this.restore = true; } /** * This stores the Identifier information identified in the last store function call * * @param ctx */ @Override public void configure(Context ctx) { String tempId = ctx.getString(ID); if (tempId == null) { if (id == null) { throw new IllegalArgumentException("id can't be null."); } } else { id = tempId; } String tempBaseDir = ctx.getString(BASE_DIR_KEY); if (tempBaseDir != null) { baseDir = tempBaseDir; } restore = ctx.getBoolean(RESTORE_KEY, restore); Long tempBlockSize = ctx.getLong(BLOCKSIZE); if (tempBlockSize != null) { blockSize = tempBlockSize; } blockSizeMultiple = ctx.getInteger(BLOCK_SIZE_MULTIPLE, blockSizeMultiple); retryCount = ctx.getInteger(NUMBER_RETRY,retryCount); } /** * This function reads the file at a location and return the bytes stored in the file " * * @param path - the location of the file * @return * @throws IOException */ byte[] readData(Path path) throws IOException { DataInputStream is = new DataInputStream(fs.open(path)); byte[] bytes = new byte[is.available()]; is.readFully(bytes); is.close(); return bytes; } /** * This function writes the bytes to a file specified by the path * * @param path the file location * @param data the data to be written to the file * @return * @throws IOException */ private FSDataOutputStream writeData(Path path, byte[] data) throws IOException { FSDataOutputStream fsOutputStream; if (fs.getScheme().equals("file")) { // local FS does not support hflush and does not flush native stream fsOutputStream = new FSDataOutputStream( new FileOutputStream(Path.getPathWithoutSchemeAndAuthority(path).toString()), null); } else { fsOutputStream = fs.create(path); } fsOutputStream.write(data); return fsOutputStream; } private long calculateOffset(long fileOffset, long fileCounter) { return ((fileCounter << 32) | (fileOffset & 0xffffffffL)); } @Override public byte[] store(Slice slice) { // logger.debug("store message "); int bytesToWrite = slice.length + DATA_LENGTH_BYTE_SIZE; if (currentWrittenFile < skipFile) { fileWriteOffset += bytesToWrite; if (fileWriteOffset >= bookKeepingFileOffset) { files2Commit.add(new DataBlock(null, bookKeepingFileOffset, new Path(basePath, currentWrittenFile + OFFSET_SUFFIX), currentWrittenFile)); currentWrittenFile++; if (fileWriteOffset > bookKeepingFileOffset) { fileWriteOffset = bytesToWrite; } else { fileWriteOffset = 0; } try { bookKeepingFileOffset = getFlushedFileWriteOffset( new Path(basePath, currentWrittenFile + BOOK_KEEPING_FILE_OFFSET)); } catch (IOException e) { throw new RuntimeException(e); } } return null; } if (flushedFileCounter == currentWrittenFile && dataStream == null) { currentWrittenFile++; fileWriteOffset = 0; } if (flushedFileCounter == skipFile && skipFile != -1) { skipFile++; } if (fileWriteOffset + bytesToWrite < blockSize) { try { /* write length and the actual data to the file */ if (fileWriteOffset == 0) { // writeData(flushedCounterFile, String.valueOf(currentWrittenFile).getBytes()).close(); dataStream = writeData(new Path(basePath, String.valueOf(currentWrittenFile)), Ints.toByteArray(slice.length)); dataStream.write(slice.buffer, slice.offset, slice.length); } else { dataStream.write(Ints.toByteArray(slice.length)); dataStream.write(slice.buffer, slice.offset, slice.length); } fileWriteOffset += bytesToWrite; byte[] fileOffset = null; if ((currentWrittenFile > skipFile) || (currentWrittenFile == skipFile && fileWriteOffset > skipOffset)) { skipFile = -1; fileOffset = new byte[IDENTIFIER_SIZE]; Server.writeLong(fileOffset, 0, calculateOffset(fileWriteOffset, currentWrittenFile)); } return fileOffset; } catch (IOException ex) { logger.warn("Error while storing the bytes {}", ex.getMessage()); closeFs(); throw new RuntimeException(ex); } } DataBlock db = new DataBlock(dataStream, fileWriteOffset, new Path(basePath, currentWrittenFile + OFFSET_SUFFIX), currentWrittenFile); db.close(); files2Commit.add(db); fileWriteOffset = 0; ++currentWrittenFile; return store(slice); } /** * @param b * @param startIndex * @return */ long byteArrayToLong(byte[] b, int startIndex) { final byte b1 = 0; return Longs.fromBytes(b1, b1, b1, b1, b[3 + startIndex], b[2 + startIndex], b[1 + startIndex], b[startIndex]); } @Override public byte[] retrieve(byte[] identifier) { skipFile = -1; skipOffset = 0; logger.debug("retrieve with address {}", Arrays.toString(identifier)); // flushing the last incomplete flushed file closeUnflushedFiles(); retrievalOffset = byteArrayToLong(identifier, 0); retrievalFile = byteArrayToLong(identifier, offset); if (retrievalFile == 0 && retrievalOffset == 0 && currentWrittenFile == 0 && fileWriteOffset == 0) { skipOffset = 0; return null; } // making sure that the deleted address is not requested again if (retrievalFile != 0 || retrievalOffset != 0) { long cleanedFile = byteArrayToLong(cleanedOffset, offset); if (retrievalFile < cleanedFile || (retrievalFile == cleanedFile && retrievalOffset < byteArrayToLong(cleanedOffset, 0))) { logger.warn("The address asked has been deleted retrievalFile={}, cleanedFile={}, retrievalOffset={}, " + "cleanedOffset={}", retrievalFile, cleanedFile, retrievalOffset, byteArrayToLong(cleanedOffset, 0)); closeFs(); throw new IllegalArgumentException(String.format("The data for address %s has already been deleted", Arrays.toString(identifier))); } } // we have just started if (retrievalFile == 0 && retrievalOffset == 0) { retrievalFile = byteArrayToLong(cleanedOffset, offset); retrievalOffset = byteArrayToLong(cleanedOffset, 0); } if ((retrievalFile > flushedFileCounter)) { skipFile = retrievalFile; skipOffset = retrievalOffset; retrievalFile = -1; return null; } if ((retrievalFile == flushedFileCounter && retrievalOffset >= flushedFileWriteOffset)) { skipFile = retrievalFile; skipOffset = retrievalOffset - flushedFileWriteOffset; retrievalFile = -1; return null; } try { if (readStream != null) { readStream.close(); readStream = null; } Path path = new Path(basePath, String.valueOf(retrievalFile)); if (!fs.exists(path)) { retrievalFile = -1; closeFs(); throw new RuntimeException(String.format("File %s does not exist", path.toString())); } byte[] flushedOffset = readData(new Path(basePath, retrievalFile + OFFSET_SUFFIX)); flushedLong = Server.readLong(flushedOffset, 0); while (retrievalOffset >= flushedLong && retrievalFile < flushedFileCounter) { retrievalOffset -= flushedLong; retrievalFile++; flushedOffset = readData(new Path(basePath, retrievalFile + OFFSET_SUFFIX)); flushedLong = Server.readLong(flushedOffset, 0); } if (retrievalOffset >= flushedLong) { logger.warn("data not flushed for the given identifier"); retrievalFile = -1; return null; } synchronized (HDFSStorage.this) { if (nextReadStream != null) { nextReadStream.close(); nextReadStream = null; } } currentData = null; path = new Path(basePath, String.valueOf(retrievalFile)); //readStream = new FSDataInputStream(fs.open(path)); currentData = readData(path); //readStream.seek(retrievalOffset); storageExecutor.submit(getNextStream()); return retrieveHelper(); } catch (IOException e) { closeFs(); throw new RuntimeException(e); } } private byte[] retrieveHelper() throws IOException { int tempRetrievalOffset = (int)retrievalOffset; int length = Ints.fromBytes(currentData[tempRetrievalOffset], currentData[tempRetrievalOffset + 1], currentData[tempRetrievalOffset + 2], currentData[tempRetrievalOffset + 3]); byte[] data = new byte[length + IDENTIFIER_SIZE]; System.arraycopy(currentData, tempRetrievalOffset + 4, data, IDENTIFIER_SIZE, length); retrievalOffset += length + DATA_LENGTH_BYTE_SIZE; if (retrievalOffset >= flushedLong) { Server.writeLong(data, 0, calculateOffset(0, retrievalFile + 1)); } else { Server.writeLong(data, 0, calculateOffset(retrievalOffset, retrievalFile)); } return data; } @Override public byte[] retrieveNext() { if (retrievalFile == -1) { closeFs(); throw new RuntimeException("Call retrieve first"); } if (retrievalFile > flushedFileCounter) { logger.warn("data is not flushed"); return null; } try { if (currentData == null) { synchronized (HDFSStorage.this) { if (nextRetrievalData != null && (retrievalFile == nextRetrievalFile)) { currentData = nextRetrievalData; flushedLong = nextFlushedLong; nextRetrievalData = null; } else { currentData = null; currentData = readData(new Path(basePath, String.valueOf(retrievalFile))); byte[] flushedOffset = readData(new Path(basePath, retrievalFile + OFFSET_SUFFIX)); flushedLong = Server.readLong(flushedOffset, 0); } } storageExecutor.submit(getNextStream()); } if (retrievalOffset >= flushedLong) { retrievalFile++; retrievalOffset = 0; if (retrievalFile > flushedFileCounter) { logger.warn("data is not flushed"); return null; } //readStream.close(); // readStream = new FSDataInputStream(fs.open(new Path(basePath, String.valueOf(retrievalFile)))); // byte[] flushedOffset = readData(new Path(basePath, retrievalFile + OFFSET_SUFFIX)); // flushedLong = Server.readLong(flushedOffset, 0); synchronized (HDFSStorage.this) { if (nextRetrievalData != null && (retrievalFile == nextRetrievalFile)) { currentData = nextRetrievalData; flushedLong = nextFlushedLong; nextRetrievalData = null; } else { currentData = null; currentData = readData(new Path(basePath, String.valueOf(retrievalFile))); byte[] flushedOffset = readData(new Path(basePath, retrievalFile + OFFSET_SUFFIX)); flushedLong = Server.readLong(flushedOffset, 0); } } storageExecutor.submit(getNextStream()); } //readStream.seek(retrievalOffset); return retrieveHelper(); } catch (IOException e) { throw new RuntimeException(e); } } @Override @SuppressWarnings("AssignmentToCollectionOrArrayFieldFromParameter") public void clean(byte[] identifier) { logger.info("clean {}", Arrays.toString(identifier)); long cleanFileIndex = byteArrayToLong(identifier, offset); long cleanFileOffset = byteArrayToLong(identifier, 0); if (flushedFileCounter == -1) { identifier = new byte[8]; } else if (cleanFileIndex > flushedFileCounter || (cleanFileIndex == flushedFileCounter && cleanFileOffset >= flushedFileWriteOffset)) { // This is to make sure that we clean only the data that is flushed cleanFileIndex = flushedFileCounter; cleanFileOffset = flushedFileWriteOffset; Server.writeLong(identifier, 0, calculateOffset(cleanFileOffset, cleanFileIndex)); } cleanedOffset = identifier; try { writeData(cleanFileOffsetFileTemp, identifier).close(); fs.rename(cleanFileOffsetFileTemp, cleanFileOffsetFile); if (cleanedFileCounter >= cleanFileIndex) { return; } do { Path path = new Path(basePath, String.valueOf(cleanedFileCounter)); if (fs.exists(path) && fs.isFile(path)) { fs.delete(path, false); } path = new Path(basePath, cleanedFileCounter + OFFSET_SUFFIX); if (fs.exists(path) && fs.isFile(path)) { fs.delete(path, false); } path = new Path(basePath, cleanedFileCounter + BOOK_KEEPING_FILE_OFFSET); if (fs.exists(path) && fs.isFile(path)) { fs.delete(path, false); } logger.info("deleted file {}", cleanedFileCounter); ++cleanedFileCounter; } while (cleanedFileCounter < cleanFileIndex); // writeData(cleanFileCounterFile, String.valueOf(cleanedFileCounter).getBytes()).close(); } catch (IOException e) { logger.warn("not able to close the streams {}", e.getMessage()); closeFs(); throw new RuntimeException(e); } } /** * This is used mainly for cleaning up of counter files created */ void cleanHelperFiles() { try { fs.delete(basePath, true); } catch (IOException e) { logger.warn(e.getMessage()); } } private void closeUnflushedFiles() { try { files2Commit.clear(); // closing the stream if (dataStream != null) { dataStream.close(); dataStream = null; // currentWrittenFile++; // fileWriteOffset = 0; } if (!fs.exists(new Path(basePath, currentWrittenFile + OFFSET_SUFFIX))) { fs.delete(new Path(basePath, String.valueOf(currentWrittenFile)), false); } if (fs.exists(new Path(basePath, flushedFileCounter + OFFSET_SUFFIX))) { // This means that flush was called flushedFileWriteOffset = getFlushedFileWriteOffset(new Path(basePath, flushedFileCounter + OFFSET_SUFFIX)); bookKeepingFileOffset = getFlushedFileWriteOffset( new Path(basePath, flushedFileCounter + BOOK_KEEPING_FILE_OFFSET)); } if (flushedFileCounter != -1) { currentWrittenFile = flushedFileCounter; fileWriteOffset = flushedFileWriteOffset; } else { currentWrittenFile = 0; fileWriteOffset = 0; } flushedLong = 0; } catch (IOException e) { closeFs(); throw new RuntimeException(e); } } @Override public void flush() { nextReadStream = null; StringBuilder builder = new StringBuilder(); Iterator<DataBlock> itr = files2Commit.iterator(); DataBlock db; try { while (itr.hasNext()) { db = itr.next(); db.updateOffsets(); builder.append(db.fileName).append(", "); } files2Commit.clear(); if (dataStream != null) { dataStream.hflush(); writeData(flushedCounterFileTemp, String.valueOf(currentWrittenFile).getBytes()).close(); fs.rename(flushedCounterFileTemp, flushedCounterFile); updateFlushedOffset(new Path(basePath, currentWrittenFile + OFFSET_SUFFIX), fileWriteOffset); flushedFileWriteOffset = fileWriteOffset; builder.append(currentWrittenFile); } logger.debug("flushed files {}", builder.toString()); } catch (IOException ex) { logger.warn("not able to close the stream {}", ex.getMessage()); closeFs(); throw new RuntimeException(ex); } flushedFileCounter = currentWrittenFile; // logger.debug("flushedFileCounter in flush {}",flushedFileCounter); } /** * This updates the flushed offset */ private void updateFlushedOffset(Path file, long bytesWritten) { byte[] lastStoredOffset = new byte[IDENTIFIER_SIZE]; Server.writeLong(lastStoredOffset, 0, bytesWritten); try { writeData(file, lastStoredOffset).close(); } catch (IOException e) { try { if (!Arrays.equals(readData(file), lastStoredOffset)) { closeFs(); throw new RuntimeException(e); } } catch (Exception e1) { closeFs(); throw new RuntimeException(e1); } } } public int getBlockSizeMultiple() { return blockSizeMultiple; } public void setBlockSizeMultiple(int blockSizeMultiple) { this.blockSizeMultiple = blockSizeMultiple; } /** * @return the baseDir */ public String getBaseDir() { return baseDir; } /** * @param baseDir the baseDir to set */ public void setBaseDir(String baseDir) { this.baseDir = baseDir; } /** * @return the id */ public String getId() { return id; } /** * @param id the id to set */ public void setId(String id) { this.id = id; } /** * @return the blockSize */ public long getBlockSize() { return blockSize; } /** * @param blockSize the blockSize to set */ public void setBlockSize(long blockSize) { this.blockSize = blockSize; } /** * @return the restore */ public boolean isRestore() { return restore; } /** * @param restore the restore to set */ public void setRestore(boolean restore) { this.restore = restore; } class DataBlock { FSDataOutputStream dataStream; long dataOffset; Path path2FlushedData; long fileName; private Path bookKeepingPath; DataBlock(FSDataOutputStream stream, long bytesWritten, Path path2FlushedData, long fileName) { this.dataStream = stream; this.dataOffset = bytesWritten; this.path2FlushedData = path2FlushedData; this.fileName = fileName; } public void close() { if (dataStream != null) { try { dataStream.close(); bookKeepingPath = new Path(basePath, fileName + BOOK_KEEPING_FILE_OFFSET); updateFlushedOffset(bookKeepingPath, dataOffset); } catch (IOException ex) { logger.warn("not able to close the stream {}", ex.getMessage()); closeFs(); throw new RuntimeException(ex); } } } public void updateOffsets() throws IOException { updateFlushedOffset(path2FlushedData, dataOffset); if (bookKeepingPath != null && fs.exists(bookKeepingPath)) { fs.delete(bookKeepingPath, false); } } } private static final Logger logger = LoggerFactory.getLogger(HDFSStorage.class); @Override public void setup(com.datatorrent.api.Context context) { Configuration conf = new Configuration(); if (baseDir == null) { baseDir = conf.get("hadoop.tmp.dir"); if (baseDir == null || baseDir.isEmpty()) { throw new IllegalArgumentException("baseDir cannot be null."); } } offset = 4; skipOffset = -1; skipFile = -1; int tempRetryCount = 0; while (tempRetryCount < retryCount && fs == null) { try { fs = FileSystem.newInstance(conf); tempRetryCount++; } catch (Throwable throwable) { logger.warn("Not able to get file system ", throwable); } } try { Path path = new Path(baseDir); basePath = new Path(path, id); if (fs == null) { fs = FileSystem.newInstance(conf); } if (!fs.exists(path)) { closeFs(); throw new RuntimeException(String.format("baseDir passed (%s) doesn't exist.", baseDir)); } if (!fs.isDirectory(path)) { closeFs(); throw new RuntimeException(String.format("baseDir passed (%s) is not a directory.", baseDir)); } if (!restore) { fs.delete(basePath, true); } if (!fs.exists(basePath) || !fs.isDirectory(basePath)) { fs.mkdirs(basePath); } if (blockSize == 0) { blockSize = fs.getDefaultBlockSize(new Path(basePath, "tempData")); } if (blockSize == 0) { blockSize = DEFAULT_BLOCK_SIZE; } blockSize = blockSizeMultiple * blockSize; currentWrittenFile = 0; cleanedFileCounter = -1; retrievalFile = -1; // fileCounterFile = new Path(basePath, IDENTITY_FILE); flushedFileCounter = -1; // cleanFileCounterFile = new Path(basePath, CLEAN_FILE); cleanFileOffsetFile = new Path(basePath, CLEAN_OFFSET_FILE); cleanFileOffsetFileTemp = new Path(basePath, CLEAN_OFFSET_FILE_TEMP); flushedCounterFile = new Path(basePath, FLUSHED_IDENTITY_FILE); flushedCounterFileTemp = new Path(basePath, FLUSHED_IDENTITY_FILE_TEMP); if (restore) { // // if (fs.exists(fileCounterFile) && fs.isFile(fileCounterFile)) { // //currentWrittenFile = Long.valueOf(new String(readData(fileCounterFile))); // } if (fs.exists(cleanFileOffsetFile) && fs.isFile(cleanFileOffsetFile)) { cleanedOffset = readData(cleanFileOffsetFile); } if (fs.exists(flushedCounterFile) && fs.isFile(flushedCounterFile)) { String strFlushedFileCounter = new String(readData(flushedCounterFile)); if (strFlushedFileCounter.isEmpty()) { logger.warn("empty flushed file"); } else { flushedFileCounter = Long.valueOf(strFlushedFileCounter); flushedFileWriteOffset = getFlushedFileWriteOffset(new Path(basePath, flushedFileCounter + OFFSET_SUFFIX)); bookKeepingFileOffset = getFlushedFileWriteOffset( new Path(basePath, flushedFileCounter + BOOK_KEEPING_FILE_OFFSET)); } } } fileWriteOffset = flushedFileWriteOffset; currentWrittenFile = flushedFileCounter; cleanedFileCounter = byteArrayToLong(cleanedOffset, offset) - 1; if (currentWrittenFile == -1) { ++currentWrittenFile; fileWriteOffset = 0; } } catch (IOException io) { throw new RuntimeException(io); } storageExecutor = Executors.newSingleThreadExecutor(new NameableThreadFactory("StorageHelper")); } private void closeFs() { if (fs != null) { try { fs.close(); fs = null; } catch (IOException e) { logger.debug(e.getMessage()); } } } private long getFlushedFileWriteOffset(Path filePath) throws IOException { if (flushedFileCounter != -1 && fs.exists(filePath)) { byte[] flushedFileOffsetByte = readData(filePath); if (flushedFileOffsetByte != null && flushedFileOffsetByte.length == 8) { return Server.readLong(flushedFileOffsetByte, 0); } } return 0; } @Override public void teardown() { logger.debug("called teardown"); try { if (readStream != null) { readStream.close(); } synchronized (HDFSStorage.this) { if (nextReadStream != null) { nextReadStream.close(); } } } catch (IOException e) { throw new RuntimeException(e); } finally { closeUnflushedFiles(); storageExecutor.shutdown(); } } private Runnable getNextStream() { return new Runnable() { @Override public void run() { try { synchronized (HDFSStorage.this) { nextRetrievalFile = retrievalFile + 1; if (nextRetrievalFile > flushedFileCounter) { nextRetrievalData = null; return; } Path path = new Path(basePath, String.valueOf(nextRetrievalFile)); Path offsetPath = new Path(basePath, nextRetrievalFile + OFFSET_SUFFIX); nextRetrievalData = null; nextRetrievalData = readData(path); byte[] flushedOffset = readData(offsetPath); nextFlushedLong = Server.readLong(flushedOffset, 0); } } catch (Throwable e) { logger.warn("in storage executor ", e); } } }; } }
/** * Start processing the xml trail files. The parser is started on invoking the method. * @throws Exception */ public void processXml() throws Exception { try { _parser.start(); } catch (XMLStreamException e) { _log.info("Unable to parse the given xml stream because of: ",e); throw e; } }
package com.google.common.primitives; import com.google.common.annotations.GwtCompatible; import com.google.common.base.Preconditions; import java.util.Comparator; @GwtCompatible public final class SignedBytes { public static final byte MAX_POWER_OF_TWO = (byte) 64; private enum LexicographicalComparator implements Comparator<byte[]> { INSTANCE; public int compare(byte[] left, byte[] right) { int minLength = Math.min(left.length, right.length); for (int i = 0; i < minLength; i++) { int result = SignedBytes.compare(left[i], right[i]); if (result != 0) { return result; } } return left.length - right.length; } } private SignedBytes() { } public static byte checkedCast(long value) { byte result = (byte) ((int) value); if (((long) result) == value) { return result; } throw new IllegalArgumentException("Out of range: " + value); } public static byte saturatedCast(long value) { if (value > 127) { return Byte.MAX_VALUE; } if (value < -128) { return Byte.MIN_VALUE; } return (byte) ((int) value); } public static int compare(byte a, byte b) { return a - b; } public static byte min(byte... array) { boolean z; if (array.length > 0) { z = true; } else { z = false; } Preconditions.checkArgument(z); byte min = array[0]; for (int i = 1; i < array.length; i++) { if (array[i] < min) { min = array[i]; } } return min; } public static byte max(byte... array) { boolean z; if (array.length > 0) { z = true; } else { z = false; } Preconditions.checkArgument(z); byte max = array[0]; for (int i = 1; i < array.length; i++) { if (array[i] > max) { max = array[i]; } } return max; } public static String join(String separator, byte... array) { Preconditions.checkNotNull(separator); if (array.length == 0) { return ""; } StringBuilder builder = new StringBuilder(array.length * 5); builder.append(array[0]); for (int i = 1; i < array.length; i++) { builder.append(separator).append(array[i]); } return builder.toString(); } public static Comparator<byte[]> lexicographicalComparator() { return LexicographicalComparator.INSTANCE; } }
<reponame>omartijn/ozo #pragma once #include <ozo/connection.h> #include <libpq-fe.h> namespace ozo { template <typename T> inline transaction_status get_transaction_status(T&& conn) { static_assert(Connection<T>, "T must be a Connection"); if (is_null(conn)) { return transaction_status::unknown; } const auto v = PQtransactionStatus(get_native_handle(conn)); switch (v) { case PQTRANS_UNKNOWN: return transaction_status::unknown; case PQTRANS_IDLE: return transaction_status::idle; case PQTRANS_ACTIVE: return transaction_status::active; case PQTRANS_INTRANS: return transaction_status::transaction; case PQTRANS_INERROR: return transaction_status::error; } throw std::invalid_argument("unsupported transaction state"); } } // namespace ozo
Psychotic and schizotypal symptoms in non-psychotic patients with obsessive-compulsive disorder Background Research is scarce with regard to the role of psychotic and schizotypal symptoms in treatment of obsessive-compulsive disorder (OCD). The aim of the current study was to investigate the occurrence and specificity of psychotic and schizotypal symptoms among non-psychotic OCD patients, and to examine whether such symptoms was associated with response to exposure and response prevention (ERP), and whether ERP for OCD had an impact on psychotic and schizotypal symptoms. Methods Non-psychotic OCD patients (n = 133) and a general non-psychotic psychiatric outpatient sample (n = 110) were assessed using self-report inventories before and after psychological treatment. Results Non-psychotic OCD patients did not report greater degree of psychotic or schizotypal symptoms than the control group. Psychotic and schizotypal symptoms were not associated with OCD symptoms before or after ERP. Psychotic and schizotypal symptom were significantly reduced following ERP. Conclusions Psychotic and schizotypal symptoms seem to be equally prevalent among non-psychotic OCD patients and non-psychotic psychiatric controls. These symptoms were more linked to depressive symptoms than OCD symptoms. In non-psychotic OCD patients, ERP seems sufficient in reducing OCD symptoms despite the presence of psychotic- and schizotypal symptoms, and reductions in psychotic- and schizotypal symptoms were observed following ERP. Background Obsessive-compulsive disorder (OCD) and psychotic disorders exhibit both biological and phenomenological overlap . Furthermore, there might not be a clear distinction in OCD between delusions and obsessions as OCD patients describe varying degrees of overvalued ideas . Studies also indicate that obsessions can transform into delusions , and that OCD and symptoms of OCD can be associated with the development of psychotic disorder over time . An increased prevalence of OCD in patients with first-episode psychosis has also been found . Although this may seem to link OCD and psychotic symptoms, other studies show mixed evidence concerning this relation, leaving unanswered the question of whether psychotic and schizotypal symptoms are particularly associated with OCD, or whether they constitute symptoms that are just as prevalent in several other emotional disorders. Some studies have explored psychotic symptoms in patients with OCD . It has been argued that a symptom-based approach may be more favourable than a diagnostic approach due to questionable validity of diagnostic systems, substantial variation in symptomatology within categorical diagnosis and the ignoring of likely continua in the symptomatology between normal and pathological . However, there is great variety in the reported occurrence of such symptoms, ranging from zero to fifty percent . There have been proposals of a distinct subtype of schizotypal OCD , as OCD patients with schizotypal symptoms have poorer insight and lower functioning than OCD patients without such symptoms. However, there are discrepant findings. One study found that patients with depression, panic disorder, social phobia, post-traumatic stress disorder, or alcohol/substance dependency, had higher life-time prevalence of schizotypal personality disorder than patients with OCD . A second study found that patients with OCD reported more symptoms of schizotypal disorder than patients with depression, but less so than patients with bipolar disorder or schizophrenia . A third study found that OCD patients with schizotypal symptoms reported higher rates of comorbid major depression, posttraumatic stress disorder, substance use disorders, and greater general psychopathology . They did not find any associations between schizotypal symptoms and OCD when controlling for differences in demographics and comorbidity. To further complicate matters, a longitudinal study argued that sub-clinical levels of psychotic disorders are associated with development of anxiety, emotional and substance-related disorders . Unlike the studies mentioned above, this study indicated that such an association was stronger between OCD and psychotic symptoms, than any of the other disorders. The proposed association between symptoms of OCD and psychotic and schizotypal symptoms may also be moderated by symptom severity because low levels of schizotypal and OCD symptoms were distinct from each other, whereas among high scoring individuals the two symptom categories were associated. Thus, it remains uncertain if psychotic and schizotypal symptoms are linked to OCD. The presence of psychotic and schizotypal symptoms may influence treatment outcome in OCD. Indeed, previous studies indicate that OCD patients with comorbid schizotypal disorder show poorer response to medical treatment for OCD . Related research comes from studies on poor insight in OCD. OCD with poor insight has been described as egosyntonic symptoms that might extend to delusions and psychosis . Most likely, insight falls on a continuum and is associated with symptom severity, chronicity, and poorer treatment prognosis . There is still a great lack of knowledge with regard to psychotic and schizotypal symptoms among patients with OCD and how these symptoms affect psychological treatment of OCD. In addition, we do not know whether treatment for OCD reduces psychotic and schizotypal symptoms. In the current study we assessed psychotic and schizotypal symptoms among non-psychotic patients with OCD. Furthermore, this study aimed to elucidate whether such symptoms are elevated in the OCD population, or similarly present in a general non-psychotic psychiatric outpatient sample. Finally, the current study aimed to investigate whether such symptoms were associated with reduced OCD treatment response, and if these symptoms are reduced following exposure and response prevention treatment (ERP). Participants and procedure This study included two samples: non-psychotic patients diagnosed with OCD (n = 133) and a general non-psychotic psychiatric outpatient group (n = 110). Participants with psychotic disorder, suicidal behaviour, and alcohol or substance dependency were excluded. Before excluding OCD patients, there were 182 treatment seekers, but 49 of these were excluded. A total of seven patients were excluded from the OCD group due to psychotic disorders. The number of excluded patients from the control group is unknown. Due to missing data the sample sizes were reduced to 103 in the non-psychotic OCD group and 73 for the non-psychotic control group at post-treatment. The non-psychotic OCD group was comprised of patients from two previously published treatment studies , and an unpublished multi-center treatment study. The mean age was 34.7 years (SD: 11.6) and consisted of 71.5 % (n = 82) female participants. Seven patients in the non-psychotic OCD group were excluded due to psychosis. OCD patients were diagnosed using the Structural Clinical Interview for DSM IV Axis I Disorders or Anxiety Disorder Interview Schedule-Revised . Patients received either ERP-based group therapy over a period of 12 weeks or individual ERP based on the manual by Foa . A total of 65 received group treatment and 68 had individual ERP treatment. Independent t-tests found no significant difference between people in group and individual treatment with regard to changes in symptoms of paranoia (p = .19), psychoticism (p = .36), schizotypal signs (p = .17), and schizophrenia nuclear signs (p = .17). The main ingredients of ERP were in the first session to formulate a case-conceptualization, present a habituation rationale, and use of self-registration of rituals for homework. The second session involved creating the exposure hierarchy and introducing the rules for ritual prevention. The following sessions were similar in structure and consisted mainly of checking homework assignments, in vivo and imaginary exposure delivered in a sequence as specified by the hierarchy and agreeing on homework assignments. Focus turned to relapse prevention when treatment was approaching termination. The general non-psychotic psychiatric outpatient group consisted of patients from a university clinic. They received 15 weekly sessions of eclectic psychotherapy delivered by graduate psychology students under supervision by experienced psychologists . The general outpatient group had individual treatment only. Supervisors had different backgrounds with training in CBT or psychodynamic therapies. Supervisors were to ensure that treatment should be based on good clinical research practice. Choice of treatment was made in agreement between therapists and supervisors. The average age in this sample was 34.5 years (SD: 11.4) and 74.5 % (n = 82) were female. Exclusion criteria included psychosis, violent behaviour, serious substance abuse or suicidal behaviour. Seven participants from the control group were excluded from our analyses due to being diagnosed with OCD. Participants in the non-psychotic control group were diagnosed using Mini International Neuropsychiatric Interview (MINI) , by graduate students under supervision and observation by experienced clinical psychologists. The psychiatric control group presented with a variety of different problems:; one with pathological gambling; two with anorexia; three with somatoform disorder; three with hypochondriasis; six with adjustment disorder; one with post-traumatic stress disorder; six with mixed anxiety and depression; nine with generalized anxiety disorder; 11 with panic disorder/agoraphobia; one with specific phobia; 14 with social phobia; 36 with depressive disorders; one with bipolar disorder; one with cannabis related disorders, and 10 people were diagnosed with a personality disorder (in some cases the graduate students did SCID-II interviews in addition to the MINI). The number of patients assessed with SCID-II is unknown. SCID-II was not used as a routine procedure, but administered when the therapist had a hypothesis that the patient suffered from a personality disorder. Twenty participants in the non-psychotic psychiatry control group were not diagnosed with a specific disorder. They were still included in the study as they were treatment seeking and received treatment at the clinic. The non-psychotic OCD group and the non-psychotic psychiatric control group had comparable demographic characteristics. There were no significant difference between the two groups with regard to age, t(234) = .22, p = .83, and gender, F(1, 241) = .29, p = .59. The two groups were also comparable with regard to employment status. A considerable proportion of the patients were unemployed or receiving disability benefits as 47.3 % of the psychiatric controls and 48.3 % of the OCD patients were not working or studying. The study was approved by Regional Committee for Medical and Health Research Ethics in Norway. Written consent for participation in the study was obtained from participants. Symptom Checklist 90 Revised (SCL-90-R) Psychotic-and schizotypal symptoms were measured with the subscales psychoticism (example item; "The idea that something is wrong with your mind") and paranoid ideation (example item; "Feeling that you are watched or talked about by others") of SCL-90-R . The SCL-90-R consists of ninety questions that can be answered on a scale from zero (not at all) to four (extremely). It measures symptoms from the past week. Its factor structure and generalizability has been debated , and it has been suggested that the SCL-90-R is a measure of negative affect rather than nine different symptom clusters. Nevertheless, a study has shown that the two subscales psychoticism and paranoid ideation can discriminate patients with psychotic disorder or psychotic symptoms from patients with non-psychotic disorder or psychosis in remission . Furthermore, these subscales, and new subscales consisting of questions from both subscales, have been utilized as measures of both subclinical psychotic and schizotypal symptoms in several studies . Two different subscales have been suggested to measure psychotic and schizotypal symptoms using the SCL-90 . The first was labelled schizotypal signs (example item; "Feeling others are to blame for most of your troubles") since it resembled criteria for schizotypal personality disorder, whereas the second was labelled schizophrenia nuclear symptoms (example item; "Hearing voices that other people do not hear") due to its similarities with criteria for schizophrenia. Later studies have validated these two subscales and they are assumed to represent psychotic and schizotypal symptoms in a more adequate and stable manner than the two previously used subscales. They have also been used in meta-studies on psychotic experiences in the normal population . In the current study, both the two original subscales and the two scales suggested by Rössler and colleagues were used. Yale-Brown Obsessive Compulsive Scale (Y-BOCS) Y-BOCS was utilized to measure symptoms of OCD. Y-BOCS is considered the gold standard for measuring OCD severity. Obsessions and compulsions are rated on a 0-4 scale with regard to frequency, distress, interference, resistance, and control. Only the OCD group was assessed using Y-BOCS. Beck Depression Inventory (BDI) BDI is one of the most widely used instruments for identification and severity measurement of depressive symptoms. In the current study BDI was employed as a control variable as several authors have argued that depressive symptoms might bias self-rated psychotic symptoms . Only the OCD group was assessed with the BDI. Psychotic-and schizotypal symptoms Independent t-tests comparing the two groups at preand post-treatment found no significant differences between them with regard to psychoticism, paranoid ideation signs, schizophrenia nuclear symptoms, or schizotypal signs. As presented in Table 1, the non-psychotic OCD group and the non-psychotic general patient group were very similar both before and after treatment concerning psychotic-and schizotypal symptoms. Lowest scores were obtained on the schizophrenia nuclear symptoms (mean item score of .18 for both groups on a scale of 0-4), while mean item scores of psychoticism was approximately .50 for both groups. Scores on paranoid ideating and schizotypal signs were approximately .80 for both groups. For the schizophrenia nuclear symptoms, 34.8 % of the OCD group had a confirming score of 1 or more compared to 34.5 % for the control group. This similarity was evident for the other subscales as well; for psychoticism, 88 % of the non-psychotic OCD group had a confirming score compared to 88.2 % in the non-psychotic controls. For paranoid ideation, 90.2 % in the non-psychotic OCD group had a confirming score compared with 85.5 % in the non-psychotic controls. And for schizotypal signs, 94 % in the non-psychotic OCD group had a confirming score compared with 90.9 % of the non-psychotic controls. There was great variation within each scale. Concerning psychotic symptoms, items such as hearing voices that other people do not hear, or the idea that someone else can control your thoughts, only 2.3 and 6.0 % had a score higher than zero. Items such as feeling lonely even when you are with other people and the idea that something is wrong with your mind was more common; as many as 65.2 and 50 % had scores higher than zero. There was somewhat less variation within schizotypal scores. One third reported feeling that others are to blame for most of your troubles, whereas more than two thirds reported having ideas or beliefs that others do not share. A closer inspection using independent t-tests for the individual subscale items found four significant differences between the two patient groups: having thoughts about sex that bother you a lot (item 84) (p = .000), the idea that you should be punished for your sins (item 85, p = .006), and having ideas or beliefs that others do not share (item 68, p = .000) were more common in the OCD group. The idea that something serious is wrong with your body (item 87, p = . 004) was more common in the control group. Put differently, the observed differences between the groups appeared more related to OCD symptoms rather than psychotic symptoms. Treatment effects We conducted repeated measures ANOVA comparing slopes of change for the non-psychotic OCD and control group. For psychoticism there was a significant reduction of symptoms, F(1,172) = 33.870, p = .000, η 2 = .165, and no interaction effect F(1,172) = .006, p = .939. There was not a significant difference in treatment effect between non-psychotic OCD group and controls for psychoticism, F(1,172) = .081, p = .611, η 2 = .002. Specificity of psychotic and schizotypal symptoms to OCD Correlation analyses were conducted to further investigate the possible specificity of psychotic and schizotypal symptoms to OCD. Partial correlations were also obtained controlling for the possible confounding effects of depression. The results of this analysis are presented in Table 2. There were weak but significant correlations between OCD symptoms and psychotic and schizotypal symptoms, however, these correlations did not remain significant when controlling for depressive symptoms. A previous study found that schizotypal and OCD symptoms were associated among people reporting severe symptoms. In our study, the correlations were nonsignificant when only patients with Y-BOCS scores above 28 (N = 26) were included. In fact the correlations were negative and ranged from -.14 (psychoticism, p = .48) to -.36 (schizophrenia nuclear symptoms, p = .07). A final aim of the study was to explore if psychotic and schizotypal symptoms affected treatment outcome for OCD. As evident from Table 2, after controlling for depressive symptoms, there was no association between psychotic-or schizotypal symptoms before treatment and OCD symptoms after treatment. Thus, higher levels of these symptoms did not attenuate the effect of ERP. Discussion Comparison between OCD patients and the general patient sample indicated that there was no significant difference in self-reported psychotic-or schizotypal symptoms. Compared with previous research using healthy controls , the non-psychotic OCD group report significantly more psychotic-and schizotypal signs (equal to an effect size of .92 for both scales). The fact that the non-psychotic OCD group and the general patient group resemble each other indicates that such symptoms constitute a general tendency for all psychiatric morbidity and not specifically for OCD. However, this study did not include psychotic patients, and it is therefore possible that OCD patients more often display psychotic comorbidity than other patient groups, but that the exclusion of such patients in the current study did not reveal such a tendency. It is, nevertheless, interesting that among non-psychotic OCD-patients, psychotic-and schizotypal symptoms were related to depressive symptoms, and not OCD symptoms. Scores on psychotic and schizotypal symptoms were moderately correlated with depressive symptoms. There may be considerable overlap between depressive symptoms and some of the SCL-90-R items assessing psychotic and schizotypal symptoms. This corresponds with previous research which has challenged the factor structure of the SCL-90-R. Vassend and Skrondal found psychotic and schizotypal symptoms to be significantly associated with the other symptom dimensions and it was suggested that SCL-90-R may measure general negative affectivity rather than specific symptom dimensions . A previous study found an association between OCD symptoms and psychotic symptoms after controlling for several variables, including depression. Their study did however, not exclude psychotic patients and had a smaller sample of subjects diagnosed with OCD. The lack of such an association in the present study may stem from the fact that OCD symptoms and schizotypal symptoms associate at only high levels of both symptoms , and that the overall level of psychotic-and schizotypal symptoms were too low. However we did not find evidence of such when investigating the patients with the most severe OCD symptoms. In the non-psychotic OCD group, only 6 % reported thought control (the idea that someone else can control your thoughts) and 2 % hearing voices. Other psychotic and schizotypal symptoms were much more prevalent. Such great variation should be expected considering the heterogeneity of psychotic symptoms. In a study by Adam et al. nearly 40 % of 30 OCD patients reported hallucinations or delusions. Differences between these findings and the present study may be due to the exclusion of patients diagnosed with comorbid psychotic disorders. Additionally, the current study assessed symptoms over the past week, whereas the Adam et al. study assessed symptoms in the past twelve months. The presence of psychotic-and schizotypal symptoms was not associated with reduced response to OCD treatment. Previous studies have found that OCD patients with comorbid schizotypal disorder do not respond as well to ERP . The results from our study, however, suggest that non-psychotic OCD patients with subclinical levels of schizotypal disorder or psychotic disorder do not require additional therapeutic interventions beyond standard ERP. However, it was observed that patients with the highest load of schizotypal symptoms had significantly greater depressive symptomatology posttreatment. This may indicate that additional therapeutic intervention for depressive symptoms may be warranted for this subgroup of OCD patients. A significant reduction in both psychotic-and schizotypal symptoms was observed following treatment and the reductions were similar for the non-psychotic OCD group and the control group. This indicates that ERP also has therapeutic efficacy for psychotic and schizotypal symptoms. Such improvement may be of great importance considering the finding that psychotic symptoms in OCD may increase risk for developing psychotic disorder . The current study has different strengths and limitations. A relatively large size of patients has been examined and contrary to several previous studies, we controlled for depressive symptoms. Limitations include the use of SCL-90-R as the only measure of psychotic or schizotypal symptoms. The validity of the SCL-90-R in assessing schizotypal symptoms in OCD is unclear. Even trained assessors may experience trouble with classifying particular complaints as a symptom of OCD or psychosis. Thus, it seems likely that patients might have difficulties with responding to self-report questionnaires assessing such symptoms. Future research should explore in greater depth the reliability of self-reported psychotic symptoms; although some studies have provided preliminary evidence of reliability and validity . Future research should also explore the relationship between poor insight and psychotic symptoms in OCD. Another limitation is the use of a general psychiatric outpatient sample instead of subsamples of different specific diagnostic groups. There is a possibility that psychotic and schizotypal symptoms differ across disorders. We did not find any evidence of such differences within our non-psychotic control group, but that could be due to small sample sizes. A final limitation concerns the different methods for determining diagnoses across the samples. Conclusions The presence of self-reported psychotic-and schizotypal symptoms are similar among non-psychotic OCD patients and general non-psychotic psychiatric outpatients. Such symptoms were associated with depressive symptoms rather than OCD symptoms. Sub-diagnostic levels of such symptoms are not associated with reduced treatment response to ERP, in fact ERP is associated with a reduction in both psychotic-and schizotypal symptoms. The reductions in these symptoms were similar for ERP for OCD and for eclectic treatment for psychiatric outpatients.
package httpserver import ( "bufio" "context" "encoding/base64" "errors" "fmt" "io" "log" "net" "net/http" "strings" "time" "github.com/Asutorufa/yuhaiin/pkg/net/interfaces/proxy" iserver "github.com/Asutorufa/yuhaiin/pkg/net/interfaces/server" "github.com/Asutorufa/yuhaiin/pkg/net/proxy/server" "github.com/Asutorufa/yuhaiin/pkg/net/utils" ) func handshake(dialer proxy.StreamProxy, username, password string) func(net.Conn) { return func(conn net.Conn) { dialer := func(addr string) (net.Conn, error) { address, err := proxy.ParseAddress("tcp", addr) if err != nil { return nil, fmt.Errorf("parse address failed: %w", err) } return dialer.Conn(address) } client := &http.Client{ Transport: &http.Transport{ MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { return dialer(addr) }, }, CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, } err := handle(username, password, conn, dialer, client) if err != nil && !errors.Is(err, io.EOF) { log.Println("http server handle failed:", err) } } } func handle(user, key string, src net.Conn, dialer func(string) (net.Conn, error), client *http.Client) error { /* use golang http */ defer src.Close() reader := bufio.NewReader(src) _start: req, err := http.ReadRequest(reader) if err != nil { return fmt.Errorf("read request failed: %w", err) } err = verifyUserPass(user, key, src, req) if err != nil { return fmt.Errorf("http verify user pass failed: %w", err) } if req.Method == http.MethodConnect { return connect(src, dialer, req) } keepAlive := strings.TrimSpace(strings.ToLower(req.Header.Get("Proxy-Connection"))) == "keep-alive" if err = normal(src, client, req, keepAlive); err != nil { // only have resp write error return nil } if keepAlive { goto _start } return nil } func verifyUserPass(user, key string, client net.Conn, req *http.Request) error { if user == "" || key == "" { return nil } username, password, isHas := parseBasicAuth(req.Header.Get("Proxy-Authorization")) if !isHas { _, _ = client.Write([]byte("HTTP/1.1 407 Proxy Authentication Required\r\nProxy-Authenticate: Basic\r\n\r\n")) return errors.New("proxy Authentication Required") } if username != user || password != key { _, _ = client.Write([]byte("HTTP/1.1 403 Forbidden\r\n\r\n")) return errors.New("user or password verify failed") } return nil } // parseBasicAuth parses an HTTP Basic Authentication string. // "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). func parseBasicAuth(auth string) (username, password string, ok bool) { const prefix = "Basic " // Case insensitive prefix match. See Issue 22736. if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) { return } c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) if err != nil { return } cs := string(c) s := strings.IndexByte(cs, ':') if s < 0 { return } return cs[:s], cs[s+1:], true } func connect(client net.Conn, f func(string) (net.Conn, error), req *http.Request) error { host := req.URL.Host if req.URL.Port() == "" { host = net.JoinHostPort(host, "80") } dst, err := f(host) if err != nil { // _, _ = src.Write([]byte("HTTP/1.1 500 Internal Server Error\r\n\r\n")) // _, _ = src.Write([]byte("HTTP/1.1 503 Service Unavailable\r\n\r\n")) er := resp503(client) if er != nil { err = fmt.Errorf("%v\nresp 503 failed: %w", err, er) } // _, _ = src.Write([]byte("HTTP/1.1 404 Not Found\r\n\r\n")) // _, _ = src.Write([]byte("HTTP/1.1 403 Forbidden\r\n\r\n")) //_, _ = src.Write([]byte("HTTP/1.1 408 Request Timeout\n\n")) // _, _ = src.Write([]byte("HTTP/1.1 451 Unavailable For Legal Reasons\n\n")) return fmt.Errorf("get conn [%s] from proxy failed: %w", host, err) } defer dst.Close() _, err = client.Write([]byte("HTTP/1.1 200 Connection established\r\n\r\n")) if err != nil { return fmt.Errorf("write to client failed: %w", err) } utils.Relay(dst, client) return nil } func normal(src net.Conn, client *http.Client, req *http.Request, keepAlive bool) error { modifyRequest(req) resp, err := client.Do(req) if err != nil { log.Printf("http client do failed: %v\n", err) resp = resp502(src) } else { modifyResponse(resp, keepAlive) } err = resp.Write(src) if err != nil { return fmt.Errorf("resp write failed: %w", err) } return nil } func modifyRequest(req *http.Request) { if len(req.URL.Host) > 0 { req.Host = req.URL.Host } // Prevent UA from being set to golang's default ones if req.Header.Get("User-Agent") == "" { req.Header.Set("User-Agent", "") } req.RequestURI = "" req.Header.Set("Connection", "close") removeHeader(req.Header) } func modifyResponse(resp *http.Response, keepAlive bool) { removeHeader(resp.Header) resp.Close = true if keepAlive && (resp.ContentLength >= 0) { resp.Header.Set("Proxy-Connection", "keep-alive") resp.Header.Set("Connection", "Keep-Alive") resp.Header.Set("Keep-Alive", "timeout=4") resp.Close = false } } // https://github.com/go-httpproxy func resp503(dst net.Conn) error { resp := &http.Response{ Status: "Service Unavailable", StatusCode: 503, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(map[string][]string), Body: nil, ContentLength: 0, Close: true, } resp.Header.Set("Connection", "close") resp.Header.Set("Proxy-Connection", "close") return resp.Write(dst) } func resp400(dst net.Conn) error { // RFC 2068 (HTTP/1.1) requires URL to be absolute URL in HTTP proxy. response := &http.Response{ Status: "Bad Request", StatusCode: 400, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: http.Header(make(map[string][]string)), Body: nil, ContentLength: 0, Close: true, } response.Header.Set("Proxy-Connection", "close") response.Header.Set("Connection", "close") return response.Write(dst) } func resp502(dst net.Conn) *http.Response { // RFC 2068 (HTTP/1.1) requires URL to be absolute URL in HTTP proxy. response := &http.Response{ Status: "Bad Request", StatusCode: http.StatusBadGateway, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: http.Header(make(map[string][]string)), Body: nil, ContentLength: 0, Close: true, } response.Header.Set("Proxy-Connection", "close") response.Header.Set("Connection", "close") return response } func removeHeader(h http.Header) { connections := h.Get("Connection") h.Del("Connection") if len(connections) != 0 { for _, x := range strings.Split(connections, ",") { h.Del(strings.TrimSpace(x)) } } h.Del("Proxy-Connection") h.Del("Proxy-Authenticate") h.Del("Proxy-Authorization") h.Del("TE") h.Del("Trailers") h.Del("Transfer-Encoding") h.Del("Upgrade") } func NewServer(host, username, password string, dialer proxy.StreamProxy) (iserver.Server, error) { return server.NewTCPServer(host, handshake(dialer, username, password)) }
<reponame>andrewcchen/matasano-cryptopals-solutions {-# OPTIONS_GHC -fno-warn-tabs #-} import Common import Hex import Xor main = do putStrLn "=== Challange3 ===" putStrLn $ vecToStr $ xorWithSingleByte enc $ breakSingleKeyXor enc where enc = hexDecode "<KEY>"
/** * A immutable map implementation for maps with comparable keys. It uses a sorted array * and binary search to return the correct values. Its only purpose is to save memory - for n * entries, it consumes 8n + 64 bytes, much less than a normal HashMap (43n + 128) or an * ImmutableMap (35n + 81). * * <p>Only a few methods are efficiently implemented: {@link #isEmpty} is O(1), {@link #get} and * {@link #containsKey} are O(log(n)), using binary search; {@link #keySet} and {@link #values} * refer to the parent instance. All other methods can take O(n) or even make a copy of the * contents. * * <p>This implementation supports neither {@code null} keys nor {@code null} values. * * @param <K> the type of keys maintained by this map; keys must be comparable * @param <V> the type of mapped values */ public final class ImmutableSortedKeyMap<K extends Comparable<K>, V> implements Map<K, V> { @SuppressWarnings({"rawtypes", "unchecked"}) private static final ImmutableSortedKeyMap EMPTY_MAP = new ImmutableSortedKeyMap(new Comparable<?>[0], new Object[0]); /** Returns the empty multimap. */ @SuppressWarnings("unchecked") public static <K extends Comparable<K>, V> ImmutableSortedKeyMap<K, V> of() { // Safe because the multimap will never hold any elements. return EMPTY_MAP; } public static <K extends Comparable<K>, V> ImmutableSortedKeyMap<K, V> of(K key0, V value0) { return ImmutableSortedKeyMap.<K, V>builder() .put(key0, value0) .build(); } public static <K extends Comparable<K>, V> ImmutableSortedKeyMap<K, V> of( K key0, V value0, K key1, V value1) { return ImmutableSortedKeyMap.<K, V>builder() .put(key0, value0) .put(key1, value1) .build(); } @SuppressWarnings("unchecked") public static <K extends Comparable<K>, V> ImmutableSortedKeyMap<K, V> copyOf(Map<K, V> data) { if (data.isEmpty()) { return EMPTY_MAP; } if (data instanceof ImmutableSortedKeyMap) { return (ImmutableSortedKeyMap<K, V>) data; } Set<K> keySet = data.keySet(); int size = keySet.size(); K[] sortedKeys = (K[]) new Comparable<?>[size]; int index = 0; for (K key : keySet) { sortedKeys[index] = Preconditions.checkNotNull(key); index++; } Arrays.sort(sortedKeys); V[] values = (V[]) new Object[size]; for (int i = 0; i < size; i++) { values[i] = data.get(sortedKeys[i]); } return new ImmutableSortedKeyMap<>(sortedKeys, values); } public static <K extends Comparable<K>, V> Builder<K, V> builder() { return new Builder<>(); } /** * A builder class for ImmutableSortedKeyListMultimap<K, V> instances. */ public static final class Builder<K extends Comparable<K>, V> { private final Map<K, V> builderMap = new HashMap<>(); Builder() { // Not public so you must call builder() instead. } public ImmutableSortedKeyMap<K, V> build() { return ImmutableSortedKeyMap.copyOf(builderMap); } public Builder<K, V> put(K key, V value) { builderMap.put(Preconditions.checkNotNull(key), Preconditions.checkNotNull(value)); return this; } public Builder<K, V> putAll(Map<? extends K, ? extends V> map) { for (Map.Entry<? extends K, ? extends V> entry : map.entrySet()) { put(entry.getKey(), entry.getValue()); } return this; } } private class ValuesCollection extends AbstractCollection<V> { ValuesCollection() { } @Override public int size() { return ImmutableSortedKeyMap.this.size(); } @Override public boolean isEmpty() { return sortedKeys.length == 0; } @Override public boolean contains(Object o) { return ImmutableSortedKeyMap.this.containsValue(o); } @Override public Iterator<V> iterator() { if (isEmpty()) { return Collections.emptyIterator(); } return new AbstractIterator<V>() { private int currentIndex = 0; @Override protected V computeNext() { if (currentIndex >= values.length) { return endOfData(); } return values[currentIndex++]; } }; } @Override public boolean remove(Object o) { throw new UnsupportedOperationException(); } @Override public boolean removeAll(Collection<?> c) { throw new UnsupportedOperationException(); } @Override public boolean retainAll(Collection<?> c) { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } } private final K[] sortedKeys; private final V[] values; private ImmutableSortedKeyMap(K[] sortedKeys, V[] values) { this.sortedKeys = sortedKeys; this.values = values; } @Override public int size() { return sortedKeys.length; } @Override public boolean isEmpty() { return sortedKeys.length == 0; } @Override public boolean containsKey(@Nullable Object key) { if (key == null) { return false; } int index = Arrays.binarySearch(sortedKeys, key); return index >= 0; } @Override public boolean containsValue(@Nullable Object value) { if (value == null) { return false; } for (V v : values) { if (v.equals(value)) { return true; } } return false; } @Override public V put(K key, V value) { throw new UnsupportedOperationException(); } @Override public V remove(Object key) { throw new UnsupportedOperationException(); } @Override public void putAll(Map<? extends K, ? extends V> map) { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public V get(@Nullable Object key) { if (key == null) { return null; } int index = Arrays.binarySearch(sortedKeys, key); return index >= 0 ? values[index] : null; } @Override public Set<K> keySet() { return ImmutableSet.copyOf(sortedKeys); } @Override public Collection<V> values() { return new ValuesCollection(); } @Override public Set<Entry<K, V>> entrySet() { ImmutableSet.Builder<Entry<K, V>> builder = ImmutableSet.builder(); for (int i = 0; i < sortedKeys.length; i++) { builder.add(new SimpleImmutableEntry<K, V>(sortedKeys[i], values[i])); } return builder.build(); } @Override public String toString() { StringBuilder result = new StringBuilder(); result.append('{'); for (int i = 0; i < sortedKeys.length; i++) { if (i != 0) { result.append(", "); } result.append(sortedKeys[i]).append('=').append(values[i]); } result.append('}'); return result.toString(); } @Override public int hashCode() { int h = 0; for (Entry<K, V> entry : entrySet()) { h += entry.hashCode(); } return h; } @Override public boolean equals(@Nullable Object object) { if (this == object) { return true; } if (object instanceof Map) { throw new UnsupportedOperationException(); } return false; } }
<filename>flux/client.go package flux import ( "context" "crypto/tls" "fmt" "io/ioutil" "net/http" "net/url" "time" "github.com/influxdata/influxdb/v2/chronograf" ) // Shared transports for all clients to prevent leaking connections. var ( skipVerifyTransport = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } defaultTransport = &http.Transport{} ) // Client is how we interact with Flux. type Client struct { URL *url.URL InsecureSkipVerify bool Timeout time.Duration } // Ping checks the connection of a Flux. func (c *Client) Ping(ctx context.Context) error { t := 2 * time.Second if c.Timeout > 0 { t = c.Timeout } ctx, cancel := context.WithTimeout(ctx, t) defer cancel() err := c.pingTimeout(ctx) return err } func (c *Client) pingTimeout(ctx context.Context) error { resps := make(chan (error)) go func() { resps <- c.ping(c.URL) }() select { case resp := <-resps: return resp case <-ctx.Done(): return chronograf.ErrUpstreamTimeout } } func (c *Client) ping(u *url.URL) error { u.Path = "ping" req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return err } hc := &http.Client{} if c.InsecureSkipVerify { hc.Transport = skipVerifyTransport } else { hc.Transport = defaultTransport } resp, err := hc.Do(req) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusNoContent { var err = fmt.Errorf(string(body)) return err } return nil }
Highly efficient electrochemical reforming of CH4/CO2 in a solid oxide electrolyser Electrochemical reforming of CH4/CO2 is demonstrated in a solid oxide electrolyser. INTRODUCTION CO 2 and CH 4 are important contributors to the greenhouse effect, as well as cheap and nontoxic building blocks for current single-carbon source chemistry (1). Conversion of CH 4 into syngas using CO 2 not only provides the key feedstock for Fischer-Tropsch synthesis but also contributes to the mitigation of greenhouse gases. The dry reforming of CH 4 with CO 2 , an endothermic reaction (CO 2 + CH 4 = 2CO + 2H 2 ), is particularly interesting from an economic point of view and has long been considered as a viable method to convert CH 4 into syngas. However, carbon deposition, which mainly originates from CO disproportionation and CH 4 pyrolysis, causes severe deactivation of non-noble metal catalysts like nickel (2,3). Long-term stability of nanocatalysts at high temperatures remains another major challenge due to nanoparticle agglomeration leading to performance degradation (4,5). In terms of mechanism, the dry reforming of CH 4 with CO 2 mainly proceeds with CH 4 dehydrogenation/oxygenation and decomposition of CO 2 to CO in a thermal catalysis process. In addition, this reaction would be ideally realized in an efficient electrochemical process by combining CO 2 electrolysis (CO 2 + 2e − = CO + O 2− ) with electrochemical oxidation of CH 4 (CH 4 + O 2− = CO + 2H 2 + 2e − ) in a simultaneous process. Solid oxide electrolysers (SOEs) as the key technology of CO 2 electrolysis have been attracting great interest because of their high efficiencies in producing low-carbon fuels from renewable electrical energy (6,7). The advantages of long life, flexible scale, and low cost have demonstrated huge practical application potentials. They can exploit available high-temperature exhaust heat to maximize electrical energy efficiency, and the high operation temperature of around 800°t o 1000°C well fits the requirement of the dry reforming of CH 4 with CO 2 to generate syngas. In an oxide ion-conducting SOE, CO 2 gas can be directly electrolyzed into CO and O 2− (CO 2 + 2e − = CO + O 2− ) at the cathode, while the generated O 2− ions travel through the electrolyte to the anode to form O 2 gas (O 2− − 2e − = 1/2O 2 ), under an externally applied potential. This unique mechanism provides the possibility of direct utilization of O 2− to in situ electrochemically oxidize CH 4 (CH 4 + O 2− = CO + 2H 2 + 2e − ) into syngas in the anode. In this case, the electrochemical reforming of CH 4 /CO 2 with CO 2 reduction in the cathode and CH 4 oxidation in the anode may be achieved through an efficient electrochemical process, which would demonstrate huge economic and sustainability potential when exploiting available exhaust heat streams and renewable electrical energy. Electrochemical reforming of CH 4 /CO 2 in an SOE involves the electrolysis of CO 2 in the cathode and the electrochemical oxidation of CH 4 in the anode in a simultaneous process. The CO disproportionation and CH 4 pyrolysis may be mainly distributed on the cathode and anode, respectively, which may accordingly relieve the severe carbon deposition that is commonly observed in the dry reforming of CH 4 with CO 2 on a metal catalyst. In addition, the electrochemical pump of oxygen species to the anode would facilitate the removal and oxidation of deposited carbon on the porous electrode scaffold (8). Here, we use redox-stable perovskite-type La 0.75 Sr 0.25 Cr 0.5 Mn 0.5 O 3−d (LSCM) both as cathode and anode to assemble a symmetric cell for electrochemical reforming of CH 4 /CO 2 . Ceramic LSCM is a mixed conductor with very high oxygen storage capacity and has already demonstrated excellent coking resistance for CH 4 reforming and oxidation (9,10). In addition, LSCM has been confirmed to be an excellent coking-resistant cathode for CO 2 electrolysis with long-term stability being observed during high-temperature operation (11,12). However, the performance of these ceramic electrodes is still limited by insufficient electrocatalytic activity, although it can be substantially enhanced by loading metal nanocatalyst to create sufficient active interfaces. The incorporation of catalytic Ni nanoparticles has already been proved to be an effective approach to enhance catalytic activity for CO 2 dry reforming of CH 4 . However, long-term instability of the nanocatalysts remains a major challenge due to nanocatalyst agglomeration leading to catalysis performance degradation at high temperatures (13). An alternative method is to dope the metal in the host lattice of LSCM when preparing the catalyst in air, which is then exsolved in the form of metallic nanoparticles at the surface of an LSCM scaffold under reducing conditions. In this way, the metal nanoparticles can be grown in situ and anchored on the LSCM surface if the conditions are carefully chosen to avoid full decomposition. In this case, any possible agglomeration of exsolved metal nanoparticles on the substrate can be remedied by periodically cycling from oxidizing to reducing conditions. The in situ growth of metal nanoparticles directly from a perovskite backbone support particularly exhibits enhanced high-temperature stability and coking resistance for CO 2 electrolysis due to the stronger metal/oxide interactions resulting from anchored interface architectures at the nanoscale (14). An additional way of improving catalytic performance with coking resistance is formation of alloy nanoparticles between Ni and other metals. This alloy catalyst with intimate interaction between different metals delivers excellent performance both for catalytic activity and coking resistance for the dry reforming of CH 4 with CO 2 (15). It is therefore proposed that a highly efficient electrochemical reforming of CH 4 /CO 2 with remarkable coking resistance and long-term stability would be anticipated when using a perovskite LSCM electrode scaffold with in situ exsolution of alloy nanocatalysts in an SOE. The Ni 1−x Cu x alloys in the interface architectures are expected to deliver strong interface interactions that highly favor CO 2 electrolysis in the cathode and CH 4 oxidation in the anode. Copper has a lower selectivity toward H 2 evolution, but it has excellent CO 2 /CO adsorption properties and coking resistance (16,17). In contrast, Ni has high catalytic activity but tends to be prone to high coke formation (18). We therefore take a compromise approach by in situ forming Ni 1−x Cu x alloys in the interface architectures and using the respective catalysis characteristics of both nickel and copper. Here, we, for the first time, develop an efficient electrochemical reforming of CH 4 /CO 2 to produce syngas in an SOE; the schematic of the process is shown in Fig. 1. We use redox-stable perovskite LSCM both as cathode and anode, whereas the Ni 1−x Cu x alloy nanocatalysts are grown in situ on an LSCM scaffold to exsolve anchored and confined interface architecture. The strong metal/oxide interface interactions would deliver enhanced high-temperature stability and coking resistance. Chemical activation of CO 2 and carbon coking resistance are investigated on these exsolved interfaces as well as their effectiveness for the efficient electrochemical reforming of CH 4 /CO 2 to produce syngas. Figure 2A shows the x-ray diffraction (XRD) of Ni/Cu co-doped LSCM that are denoted as (La 0.75 Sr 0.25 ) 0.9 (Cr 0.5 Mn 0.5 ) 0.9 (Ni 1−x Cu x ) 0.1 O 3−d (LSCM-Ni 1−x Cu x ) in oxidation state. The doping of metal in the host lattice would transform Ni/Cu into a metallic alloy nanocatalyst on LSCM scaffold after reduction, whereas the A-site deficiency in perovskite LSCM would facilitate the exsolution of metal nanoparticles. As expected, the Ni/Cu are exclusively present as Ni 2+ and Cu 2+ , as shown in the x-ray photoelectron spectroscopy (XPS) images in fig. S1, indicating the successful doping in the host lattice. Figure 2B shows the XRD of LSCM-Ni 1−x Cu x in a reduced state, which reveals the presence of Ni 1−x Cu x alloy phase upon reduction. XPS in fig. S1 confirms that only metallic Ni/Cu is observed upon reduction, which further validates the transformation of Ni 2+ and Cu 2+ into a metallic Ni 1−x Cu x alloy. The oxygen storage capacity of LSCM is around 0.09 mol in the redox cycle, whereas the oxygen storage capacity is up to 0.18 mol for LSCM-Ni 1−x Cu x as shown in fig. S2A, which implies that up to 90% of Ni/Cu dopants have been exsolved on LSCM scaffold upon reduction. Figure 2C shows the scanning electron microscopy (SEM) graph of LSCM particles after reduction, which displays the uniform Ni 0.5 Cu 0.5 nanoparticles anchoring the LSCM scaffold surface. The metallic alloy nanoparticles exist within a narrow size distribution, with an average of 40 nm, which should deliver highly active exsolved interfaces at the nanoscale. Figure 2D shows that the grown Ni 0.5 Cu 0.5 nanoparticles deeply anchor on the LSCM substrate according to HRTEM (high-resolution transmission electron microscopy) observations, which demonstrates a clear heterojunction between the two phases that is anticipated to produce a strong interaction at exsolved interfaces. In addition, the anchoring effect should provide enhanced thermal stability against the severe long-term agglomeration, whereas the nanoparticle regeneration is also possible through periodic redox cycling. The exsolution of confined interfaces is a generic approach that is expected to be extended to many more scaffolds by synergistic control of doping and nonstoichiometry. Other substrates like ceria and nickel can be doped into lattice to form a Ce 1−x Ni x O 2−d solid solution, and then the nickel nanoparticles can be grown in situ on the ceria surface after reduction at high temperatures. RESULTS AND DISCUSSION The growth of metal nanoparticles on the LSCM scaffold further improves the mixed conductivity in a reducing atmosphere in fig. S2B. The exsolved interfaces are expected to have enhanced catalytic activity, whereas the LSCM with oxygen nonstoichiometry may facilitate chemical CO 2 adsorption/activation at high temperatures. Figure 3A shows the chemical adsorption of CO 2 at 800°C in the in situ Fourier transform infrared (FTIR) test, which demonstrates the effective chemical adsorption/activation of CO 2 as confirmed by the transition state (TS) between the CO 2 molecule and the carbonate ion (19,20). With the exsolved interfaces, significant enhancement of chemical adsorption/ activation of CO 2 would be anticipated on the tailored LSCM scaffold. Theoretical calculations were tentatively performed to provide mechanistic insights into the CO 2 adsorption onto M/LCO (001) surfaces, where M represents the Cu, Ni metal, and Ni-Cu alloy, whereas LCO denotes a simplified LaCrO 3 substrate. The possible configurations for the most stable adsorption are shown in fig. S3 (A to C), whereas other configurations are shown in fig. S4. Most of these adsorption configurations involve CO 2 forming a bidentate. The chemisorption on (Ni-Cu)/LCO demonstrates an adsorption energy of −2.45 eV, which is significantly higher than that for the Cu (−0.94 eV) and Ni (−1.96 eV) cluster, indicating a favorable alloy effect for CO 2 adsorption. An overview of adsorption energies, bond distances, and angles is provided in table S1. In all (Ni-Cu)/LCO systems, the O atom of CO 2 binds with the surface Cr atom, whereas the C atom binds with the Ni atom. The Ni-C distances are in the range of 1.88 to 2.04 Å. The elongated C-O bonds and bent O-C-O angles in CO 2 additionally suggest significant activation upon adsorption. On the other hand, we consider the possible effect of oxygen vacancy on CO 2 activation at the interfaces. We considered one oxygen vacancy at the interface ( fig. S5, A to C) and found that the adsorption energy of (Ni-Cu)/LCO is significantly enhanced to −2.72 eV as shown in Fig. 3B. The synergistic effect of the Ni-Cu alloy and oxygen vacancy facilitates the adsorption of CO 2 at interfaces. The contour plots in fig. S3H show that the charge density changes mainly emerge in the 2p orbitals of the C and O1 atoms of CO 2 , the Cu atom, and the relevant surface Cr atoms. The C atom of the CO 2 and the O2 atom gain the electrons, O1 also receives some electron density, donated by all the surrounding atoms, and the Cr atoms of the LCO surface and the Cu atom of the Ni-Cu clusters lose the electrons, which means the charge transfers from the interface to CO 2 . To sum up, the Ni-Cu cluster and surface defect with oxygen vacancy on the (Ni-Cu)/ LCO interface system would compose an active interface structure, which greatly promotes the chemical adsorption of CO 2 . As mentioned above, CO 2 bending upon chemisorption suggests electrons transfer from the interface to the CO 2 moiety. As shown in table S1, most of the charges are transferred toward the carbon atom. The charge transfer in the alloy is more favorable, which indicates that the alloy tends to have stronger interaction with CO 2 , that is, more electrons are transferred from the interface to the CO 2 ; thus, the binding of CO 2 will be stronger. To validate the density functional theory (DFT) calculation for mimicking the surface adsorption behavior at high temperatures, we further conducted the molecular dynamics simulation at 300 to 1100 K to understand the chemisorption of CO 2 at interfaces at high temperatures. As shown in fig. S5 (D to L), the chemisorption of CO 2 at the (Ni-Cu)/LCO (001) system surface is still stable even at 300 to 1100 K, which further indicates that the chemisorption of CO 2 is pretty strong at the exsolved interface architectures. The chemical adsorption/activation of CO 2 at interfaces is expected to significantly enhance electrode activity. Long-term stability of nanocatalysts at high temperatures remains a major challenge due to nanoparticle agglomeration leading to performance degradation. Here, the nickel-copper catalyst is exsolved and anchored on the LSCM scaffold as shown in Fig. 2C, which should deliver enhanced high-temperature stability against nanoparticle agglomeration. For the reforming of CH 4 /CO 2 at high temperatures, carbon deposition causes severe deactivation of nickel catalysts, which mainly originates from CO disproportionation and CH 4 pyrolysis. As shown in Fig. 3C, the LSCM scaffold with in situ-grown and anchored Ni 0.5 Cu 0.5 alloy nanoparticles demonstrates remarkable coking resistance in the H 2 /CH 4 mixture at 800°C for 3 hours. Only a little amount of carbon is present at the exsolved interfaces. However, serious carbon deposition is inevitable if metal nanoparticles are simply loaded on the scaffold as shown in the inset image in Fig. 3C. One reason may be the strong interactions at the exsolved interfaces at nanoscale that enhance coking resistance of the nickel nanocatalyst, which is probably due to the charge transferred from metal particles with strong interface interactions. Another reason would be the alloy effects with strong interactions that can also enhance coking resistance (15). In a real reforming process, we normally consider the feasibility of carbon removal to evaluate the coking resistance performance. Here, we constructed a Ni/LCO (001) model with oxygen vacancies at the interface to investigate the mechanism of carbon removal in the presence of H 2 or H 2 O. According to previous studies (21,22), H 2 O prefers to dissociate at the metal/oxide interface compared to metal and oxide surfaces. It is found that H 2 O is strongly adsorbed on Cr atoms at the interface, which releases an energy of −1.94 eV and leads to a barrier less than the O-H bond cleavage. The dissociated OH then reacts with an adsorbed C on the Ni/LCO (001) interface via TS1 (reaction barrier, 0.46 eV) to form an intermediate COH, which is subsequently dissociated to CO and H via TS2 (reaction barrier, 0.96 eV) as shown in Fig. 3D. Because the reaction is carried out at a constant high temperature of 800°C, the possibility of carbon removal induced by H 2 was also considered. The results show that the H 2 molecules overcome the energy barrier dissociation of 1.05 eV, and then the dissociated H atom is adsorbed on the O atom near the interface. After the adsorption of the H atom, it will take out the O atom in LCO and form OH. The OH reacts with C via TS3 (reaction barrier, 2.16 eV) to form COH and then subsequently dissociates to CO, which is the same as the case of carbon removal induced by H 2 O. Therefore, the system has a strong resistance to carbon deposition at high temperatures because of the synergistic effect of these two processes. In addition, the carbon removal at the interfaces is highly favorable with electrochemically provided oxygen species in the anode because the oxygen ion is simultaneously transported from the cathode under external applied voltages, which would electrochemically oxidize the deposited carbon into carbon monoxide gas and further substantially enhance coking resistance. Single solid oxide cells with porous LSCM electrode decorated with nanoscale metal catalysts were constructed, respectively, and the cell microstructures are shown in fig. S6. To evaluate the electrode performance, the electrolysis of CO 2 was initially performed with the tailored LSCM cathode and the (La 0.8 Sr 0.2 ) 0.95 MnO 3−d (LSM) anode at 800°C. Figure 4 (A and B) shows that the current-voltage (I-V) relationship reveals the superior performance with up to 300% enhancement of the LSCM cathodes with exsolved interfaces in comparison to the bare LSCM cathode. The growth of metal nanoparticles drastically improves the current density to 1.2 A cm −2 at 1.5 V when optimum alloy compositions are obtained. These values are comparable to the performances of nickel-decorated (La,Sr)(Ti,Mn)O 3+d cathodes and La 0.43 Ca 0.37 Ni 0.06 Ti 0.94 O 3+d prepared with an electrochemical switching method (14,23). The in situ impedance spectra in fig. S7 show the electrode polarization resistance is as low as 0.3 ohm·cm 2 for the Ni 0.5 Cu 0.5 -decorated LSCM, indicating the remarkable enhancement of electrode activity (24). The polarization resistance of electrodes gradually decreases with increasing nickel content in Ni 1−x Cu x alloy nanoparticles when compared with all of the proportions, which is attributed to the catalytic activity of nickel metal being greater than that of copper. The catalytic activity of the electrode with exsolved interfaces can be continuously tuned according to the strong dependence of the electrode activity on the Ni 1−x Cu x alloy compositions under reducing atmospheres at high temperatures, whereas the optimum synergetic effect is observed for the Ni 0.5 Cu 0.5 composition. Figure 4 (C and D) shows the CO generation rate and current efficiency, respectively, which again confirms that the synergy of interface catalysis and alloy effect delivers the best performances with current efficiency up to 100%. Figure 5 (A and B) shows the I-V curves of the SOEs with CO 2 electrolysis in the Ni 0.5 Cu 0.5 -LSCM cathode and CH 4 oxidation in different anodes. The electrochemical oxidation of CH 4 shows a strong dependence on the Ni 1−x Cu x composition in the interface architectures, in which the current density also increases rapidly with increasing nickel content up to 50% because of the high catalytic activity of nickel. Thus, a high content of nickel catalyst improves the interface catalysis. However, an optimum composition of Ni 0.5 Cu 0.5 has been proven to be highly effective to enhance electrochemical performance. It is observed that the current density for CH 4 oxidation has been significantly enhanced by~100% in contrast to the performances only for CO 2 electrolysis. The voltage is accordingly reduced by 0.1 to 0.2 V at identical current densities when the electrochemical oxidation of CH 4 is performed in the anode in conjunction with CO 2 electrolysis in the cathode. This further indicates that the electrochemical reforming of CH 4 /CO 2 is more efficient than CO 2 electrolysis because the CH 4 atmosphere in the anode remarkably reduces oxygen partial pressure that accordingly facilitates the whole electrochemical process. In situ impedance tests as shown in Fig. 5C and fig. S8 further confirm that the electrode polarization resistance is significantly reduced by 30% in contrast to the CO 2 electrolysis under identical operation conditions. Figure 5D shows the generation of H 2 /CO in the anode in relation to different Ni 1−x Cu x compositions and applied voltages. The H 2 /CO production demonstrates a strong dependence on Ni 1−x Cu x compositions in the anode, where the best performances are observed for the Cu 0.5 Ni 0.5 -LSCM anode, which indicates that the optimum alloy composition delivers the best enhanced activity for electrochemical oxidation of CH 4 in the anode. In addition, the H 2 /CO ratio shows a dependence on the applied voltages with an optimum ratio of 2 below 1.1 V; however, a ratio below 2 is observed at higher voltages, indicating the tendency of selective oxidation of CO in H 2 /CO mixtures at high voltages that decreases CO concentration in contrast to H 2 . In this simultaneous process, the conversion ratio of CO 2 /CH 4 would rely on both the gas flow rates and the passing current densities, whereas up to 90% conversion ratios can be obtained without obvious carbon deposition from the CO disproportionation and CH 4 pyrolysis. Here, syngas is the main product, but ethylene is~500 parts per million, which also confirms the possibility of direct generation of ethylene. If in situ catalysis is combined with the electrochemical process on a porous electrode in a single step, then the CO 2 /CH 4 would be further directly converted into chemicals such as ethylene when the exsolved interfaces with iron nanoparticles were used to better suit the in situ catalysis process. Figure S9 shows the shortterm performance of the electrochemical reforming of CH 4 /CO 2 with the Cu 0.5 Ni 0.5 -LSCM electrode in a symmetric cell, which indicates remarkable stability even after 300 hours of high-temperature operation. The all-perovskite symmetric cell demonstrates excellent redox stability even after 10 redox cycles within the frames of these experiments. The exsolution of anchored metal/oxide interfaces to the perovskite lattice at nanoscale is expected to improve both longterm and redox stability by preventing particle coalescence driven by surface energy reduction. CONCLUSION In conclusion, we have demonstrated a highly efficient electrochemical reforming of CH 4 /CO 2 in an all-perovskite SOE with exceptionally high performance and stability. The exsolved metal/ oxide interfaces at nanoscale show strong interactions that deliver enhanced coking resistance and stability. These confined metal/oxide interface architectures enable both carbon removal performance and high-temperature chemical CO 2 adsorption/activation. In addition, our work furthermore exhibits high-temperature stability for 300 hours with significant redox cycling ability, providing the tools to develop an alternative route for CO 2 /CH 4 conversion for energy conversion and storage. (25,26). The collected powders were sintered at 1200°C for 5 hours with a heating rate of 3°C min −1 in air. The LSM and Ce 0.8 Sm 0.2 O 2−d (SDC) powders were prepared using a combustion method, whereas the corresponding heat treatments were at 1100°C for 3 hours and at 800°C for 3 hours, respectively (9,27). The pure LSCM, LSCMNi, LSCM-Ni 0.75 Cu 0.25 , LSCM-Ni 0.5 Cu 0.5 , LSCM-Ni 0.25 Cu 0.75 , and LSCMCu powders were treated in 5% H 2 /Ar at 800°C for 20 hours to prepare the reduced samples. La 0.9 Sr 0.1 Ga 0.8 Mg 0.2 O 3 (LSGM) was prepared using a solid-state reaction method and then pressed into pellet and sintered at 1500°C for 6 hours in air (28). The LSGM disk with a thickness of 0.6 mm and a diameter of 20 mm was polished and used as an electrolyte support. Characterization The crystal structures of the samples were characterized by XRD with a scan rate of 5°C min −1 in the 2q range of 20°to 80°. XPS with a Al Ka (1486.6 eV) radiation source was carried out to analyze the elemental states of the samples. A dc four-terminal method was used to investigate the conductivity of the samples in 5% H 2 /Ar over a temperature range of 600°to 800°C. A thermogravimetric analysis method was used to analyze the oxygen stoichiometry of the samples. SEM was used to investigate the sample microstructure. TEM analysis with selected-area diffraction was performed to observe the interface architecture on a JEOL 2100F field emission transmission electron microscope operated at 200 kV. Electrochemical characterization The cells with LSGM electrolyte were fabricated using different electrodes (0.5 cm 2 ) and the SDC at the ratio of 35 weight % (wt %) and proper amount of a-terpineol with cellulose were added. The electrode slurries were screen-printed on both surfaces of the LSGM disk, followed by a heat treatment at 1100°C for 3 hours to assemble a single cell. The gold mesh (82 mesh) was used as current collector, and the gold wire is 0.06 mm in diameter. The electrodes were prereduced using 5% H 2 /Ar for activation at 800°C. For the electrochemical tests, an electrochemical working station (IM6, Zahner) in a two-electrode mode was used. The compositions of the cycling gas products were analyzed using an online gas chromatography (GC2014, Shimazu). In situ impedance data were recorded with a voltage amplitude of 10 mV over the applied frequency in the range of 0.1 Hz to 4 MHz. Long-term stability tests were performed at a fixed voltage while the current densities were recorded online using an electrochemical working station. Theoretical calculations DFT calculations were performed using a plane wave basis set Vienna Ab initio Simulation Package code (29). Within the projector augmented wave framework, the plane wave cutoff used for total energy calculations was set to 450 eV. The generalized gradient approach was used including the Perdew-Burke-Ernzerhof functional to describe exchange and correlation (30). The energies and residual forces were converged to 10 -6 eV and 0.02 eV Å −1 , respectively. The optimized crystal structure and lattice parameters of LCO(LaCrO 3 ) on a 3 × 3 × 6 k-point grid was cubic, with a = 3.891 Å, which is in good agreement with the experimental values (31). A p(2 × 2) superstructure with four layers (160 atoms) of the (001) surface of LCO was used to simulate the periodic slab model. The two bottom layers were fixed to its bulk geometry during optimization, and other atoms were fully relaxed. The vacuum region is 15 Å thick. The Cu segregation on the (001) surface slab of the LCO was mimicked by a system containing a Cu cluster with 13 Cu atoms laying on the (001) surface of LCO, and Ni clusters were similar to Cu clusters; however, Ni-Cu clusters contained seven Ni atoms and six Cu atoms. A 2 × 2 × 1 k-point grid was used for Brillouin zone sampling of the M(Cu, Ni, or Ni-Cu)/LCO (001) surface system. The adsorption energy of CO 2 was calculated as E ads = E total − E CO 2 − E substrate , where E total is the total energy of the adsorption system,E CO 2 and E substrate are the CO 2 in gas phase and the energy of the M/LCO surface system without adsorption, respectively (32). TS searches were carried out by the climbing image nudged elastic band method (33,34). The stability of the high-temperature structure was verified by molecular dynamics simulation. SUPPLEMENTARY MATERIALS Supplementary material for this article is available at http://advances.sciencemag.org/cgi/ content/full/4/3/eaar5100/DC1 fig. S1. XPS results of oxidized and reduced samples. fig. S2. Thermogravimetric analysis (TGA) of reduced samples and the conductivities of reduced samples. fig. S3. The best configurations for chemisorption of CO 2 on system surface and corresponding differential charge density. fig. S4. Different configurations for chemisorption of CO 2 on the M/LCO (001) system surface. fig. S5. The configurations for chemisorption of CO 2 on the M/LCO (001) system defected surface and the corresponding molecular dynamics calculation. fig. S6. SEM image for the single cells. fig. S7. The ac impedance spectra of the electrolyzers. fig. S8. The ac impedance spectra of the electrolyzers for single solid oxide cells. fig. S9. The long-term and redox cycling performance of the symmetric cell. table S1. The adsorption energies, bond distances, and bond angle after CO 2 adsorption and charge analysis of a partial system.
A New Class of Optimum Importance Sampling Strategies Derived from Statistical Distance Measures Summary Performance analysis of discrete time systems often requires the evaluation of the expected value of a cost function of the system output or equivalently the expected value of a functional of the system input vector. In either case, analytical expressions for the performance in many practical situations are typically unavailable. As an alternative to approximations or bounds, Monte Carlo simulations are often employed as a convergent method for obtaining arbitrarily accurate estimates of the performance. Unfortunately, in many important applications, the required computations can often be prohibitive. Therefore, much recent research has focused on the develop ment of new and efficient forms of the method of Monte Carlo known as Importance Sampling simulations. The fundamental problem in Importance Sampling is to determine the appropriate statistics for the simulation. It is well known that the minimum variance statistics can not be implemented since they require the explicit knowledge of the expectation one is attempting to estimate. Therefore, researchers have worked to determine “good” suboptimal statistics from probability measure constraint classes which exclude the degenerate optimal solution. These so-called “biasing strategies” are typically obtained by minimizing the variance of the Importance Sampling estimator with respect to the biasing statistics over a class of probability measures which exclude the optimal distribution. Recently , it was shown by the authors that minimizing the Importance Sampling variance is equivalent to minimizing an Ali-Silvey distance of equivalently an f-divergence between the admissible biasing densities and the well known op timal biasing density. This result has led to a new approach in the design of Importance Sampling strategies where one merely determines the biasing density from an arbitrary constraint class with minimum “distance” to the global optimal distribution to minimize the simulation variance. Extending this previous research, we derive in this work the minimum variance biasing distribution from a constraint class whose controlling parameter is fundamental in the performance analysis of Importance Sampling. In addition, we will show that for the special case of estimating the probability of rare events, the constrained optimal biasing distribution from this class is independent of the unknown parameter and as such, leads to solutions which are both amenable to implementation and yet still optimal with respect to a relevant criterion. To motivate the proposed constraint class, we note that it
<reponame>bieganski/esp-idf // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * All the APIs declared here are internal only APIs, it can only be used by * espressif internal modules, such as SSC, LWIP, TCPIP adapter etc, espressif * customers are not recommended to use them. * * If someone really want to use specified APIs declared in here, please contact * espressif AE/developer to make sure you know the limitations or risk of * the API, otherwise you may get unexpected behavior!!! * */ #ifndef __ESP_WIFI_INTERNAL_H__ #define __ESP_WIFI_INTERNAL_H__ #include <stdint.h> #include <stdbool.h> #include "freertos/FreeRTOS.h" #include "freertos/queue.h" #include "sys/queue.h" #include "esp_err.h" #include "esp_wifi_types.h" #include "esp_event.h" #include "esp_wifi.h" #include "esp_smartconfig.h" #include "wifi_types.h" #ifdef __cplusplus extern "C" { #endif typedef struct { QueueHandle_t handle; /**< FreeRTOS queue handler */ void *storage; /**< storage for FreeRTOS queue */ } wifi_static_queue_t; /** * @brief WiFi log level * */ typedef enum { WIFI_LOG_NONE = 0, WIFI_LOG_ERROR , /*enabled by default*/ WIFI_LOG_WARNING, /*enabled by default*/ WIFI_LOG_INFO, /*enabled by default*/ WIFI_LOG_DEBUG, /*can be set in menuconfig*/ WIFI_LOG_VERBOSE, /*can be set in menuconfig*/ } wifi_log_level_t; /** * @brief WiFi log module definition * */ typedef enum { WIFI_LOG_MODULE_ALL = 0, /*all log modules */ WIFI_LOG_MODULE_WIFI, /*logs related to WiFi*/ WIFI_LOG_MODULE_COEX, /*logs related to WiFi and BT(or BLE) coexist*/ WIFI_LOG_MODULE_MESH, /*logs related to Mesh*/ } wifi_log_module_t; /** * @brief FTM Report log levels configuration * */ typedef struct { uint8_t show_rtt:1; /**< Display all valid Round-Trip-Time readings for FTM frames */ uint8_t show_diag:1; /**< Display dialogue tokens for all FTM frames with valid readings */ uint8_t show_t1t2t3t4:1;/**< Display all valid T1, T2, T3, T4 readings considered while calculating RTT */ uint8_t show_rxrssi:1; /**< Display RSSI for each FTM frame with valid readings */ } ftm_report_log_level_t; /** * @brief WiFi log submodule definition * */ #define WIFI_LOG_SUBMODULE_ALL (0) /*all log submodules*/ #define WIFI_LOG_SUBMODULE_INIT (1) /*logs related to initialization*/ #define WIFI_LOG_SUBMODULE_IOCTL (1<<1) /*logs related to API calling*/ #define WIFI_LOG_SUBMODULE_CONN (1<<2) /*logs related to connecting*/ #define WIFI_LOG_SUBMODULE_SCAN (1<<3) /*logs related to scaning*/ /** * @brief Initialize Wi-Fi Driver * Alloc resource for WiFi driver, such as WiFi control structure, RX/TX buffer, * WiFi NVS structure among others. * * For the most part, you need not call this function directly. It gets called * from esp_wifi_init(). * * This function may be called, if you only need to initialize the Wi-Fi driver * without having to use the network stack on top. * * @param config provide WiFi init configuration * * @return * - ESP_OK: succeed * - ESP_ERR_NO_MEM: out of memory * - others: refer to error code esp_err.h */ esp_err_t esp_wifi_init_internal(const wifi_init_config_t *config); /** * @brief Deinitialize Wi-Fi Driver * Free resource for WiFi driver, such as WiFi control structure, RX/TX buffer, * WiFi NVS structure among others. * * For the most part, you need not call this function directly. It gets called * from esp_wifi_deinit(). * * This function may be called, if you call esp_wifi_init_internal to initialize * WiFi driver. * * @return * - ESP_OK: succeed * - others: refer to error code esp_err.h */ esp_err_t esp_wifi_deinit_internal(void); /** * @brief free the rx buffer which allocated by wifi driver * * @param void* buffer: rx buffer pointer */ void esp_wifi_internal_free_rx_buffer(void* buffer); /** * @brief transmit the buffer via wifi driver * * This API makes a copy of the input buffer and then forwards the buffer * copy to WiFi driver. * * @param wifi_interface_t wifi_if : wifi interface id * @param void *buffer : the buffer to be tansmit * @param uint16_t len : the length of buffer * * @return * - ESP_OK : Successfully transmit the buffer to wifi driver * - ESP_ERR_NO_MEM: out of memory * - ESP_ERR_WIFI_ARG: invalid argument * - ESP_ERR_WIFI_IF : WiFi interface is invalid * - ESP_ERR_WIFI_CONN : WiFi interface is not created, e.g. send the data to STA while WiFi mode is AP mode * - ESP_ERR_WIFI_NOT_STARTED : WiFi is not started * - ESP_ERR_WIFI_STATE : WiFi internal state is not ready, e.g. WiFi is not started * - ESP_ERR_WIFI_NOT_ASSOC : WiFi is not associated * - ESP_ERR_WIFI_TX_DISALLOW : WiFi TX is disallowed, e.g. WiFi hasn't pass the authentication * - ESP_ERR_WIFI_POST : caller fails to post event to WiFi task */ int esp_wifi_internal_tx(wifi_interface_t wifi_if, void *buffer, uint16_t len); /** * @brief The net stack buffer reference counter callback function * */ typedef void (*wifi_netstack_buf_ref_cb_t)(void *netstack_buf); /** * @brief The net stack buffer free callback function * */ typedef void (*wifi_netstack_buf_free_cb_t)(void *netstack_buf); /** * @brief transmit the buffer by reference via wifi driver * * This API firstly increases the reference counter of the input buffer and * then forwards the buffer to WiFi driver. The WiFi driver will free the buffer * after processing it. Use esp_wifi_internal_tx() if the uplayer buffer doesn't * supports reference counter. * * @param wifi_if : wifi interface id * @param buffer : the buffer to be tansmit * @param len : the length of buffer * @param netstack_buf : the netstack buffer related to bufffer * * @return * - ESP_OK : Successfully transmit the buffer to wifi driver * - ESP_ERR_NO_MEM: out of memory * - ESP_ERR_WIFI_ARG: invalid argument * - ESP_ERR_WIFI_IF : WiFi interface is invalid * - ESP_ERR_WIFI_CONN : WiFi interface is not created, e.g. send the data to STA while WiFi mode is AP mode * - ESP_ERR_WIFI_NOT_STARTED : WiFi is not started * - ESP_ERR_WIFI_STATE : WiFi internal state is not ready, e.g. WiFi is not started * - ESP_ERR_WIFI_NOT_ASSOC : WiFi is not associated * - ESP_ERR_WIFI_TX_DISALLOW : WiFi TX is disallowed, e.g. WiFi hasn't pass the authentication * - ESP_ERR_WIFI_POST : caller fails to post event to WiFi task */ esp_err_t esp_wifi_internal_tx_by_ref(wifi_interface_t ifx, void *buffer, size_t len, void *netstack_buf); /** * @brief Initialize WAPI function when wpa_supplicant initialize. * * This API is privately used, be careful not open to external applicantion. * * @return * - ESP_OK : succeed * - ESP_ERR_WAPI_INTERNAL : Internal error */ esp_err_t esp_wifi_internal_wapi_init(void); /** * @brief De-initialize WAPI function when wpa_supplicant de-initialize. * * This API is privately used, be careful not open to external applicantion. * * @return * - ESP_OK : succeed */ esp_err_t esp_wifi_internal_wapi_deinit(void); /** * @brief register the net stack buffer reference increasing and free callback * * @param ref : net stack buffer reference callback * @param free: net stack buffer free callback * * @return * - ESP_OK : Successfully transmit the buffer to wifi driver * - others : failed to register the callback */ esp_err_t esp_wifi_internal_reg_netstack_buf_cb(wifi_netstack_buf_ref_cb_t ref, wifi_netstack_buf_free_cb_t free); /** * @brief The WiFi RX callback function * * Each time the WiFi need to forward the packets to high layer, the callback function will be called */ typedef esp_err_t (*wifi_rxcb_t)(void *buffer, uint16_t len, void *eb); /** * @brief Set the WiFi RX callback * * @attention 1. Currently we support only one RX callback for each interface * * @param wifi_interface_t ifx : interface * @param wifi_rxcb_t fn : WiFi RX callback * * @return * - ESP_OK : succeed * - others : fail */ esp_err_t esp_wifi_internal_reg_rxcb(wifi_interface_t ifx, wifi_rxcb_t fn); /** * @brief Notify WIFI driver that the station got ip successfully * * @return * - ESP_OK : succeed * - others : fail */ esp_err_t esp_wifi_internal_set_sta_ip(void); /** * @brief enable or disable transmitting WiFi MAC frame with fixed rate * * @attention 1. If fixed rate is enabled, both management and data frame are transmitted with fixed rate * @attention 2. Make sure that the receiver is able to receive the frame with the fixed rate if you want the frame to be received * * @param ifx : wifi interface * @param en : false - disable, true - enable * @param rate : PHY rate * * @return * - ERR_OK : succeed * - ESP_ERR_WIFI_NOT_INIT: WiFi is not initialized by esp_wifi_init * - ESP_ERR_WIFI_NOT_STARTED: WiFi was not started by esp_wifi_start * - ESP_ERR_WIFI_IF : invalid WiFi interface * - ESP_ERR_INVALID_ARG : invalid rate * - ESP_ERR_NOT_SUPPORTED : do not support to set fixed rate if TX AMPDU is enabled */ esp_err_t esp_wifi_internal_set_fix_rate(wifi_interface_t ifx, bool en, wifi_phy_rate_t rate); /** * @brief Start SmartConfig, config ESP device to connect AP. You need to broadcast information by phone APP. * Device sniffer special packets from the air that containing SSID and password of target AP. * * @attention 1. This API can be called in station or softAP-station mode. * @attention 2. Can not call esp_smartconfig_start twice before it finish, please call * esp_smartconfig_stop first. * * @param config pointer to smartconfig start configure structure * * @return * - ESP_OK: succeed * - others: fail */ esp_err_t esp_smartconfig_internal_start(const smartconfig_start_config_t *config); /** * @brief Stop SmartConfig, free the buffer taken by esp_smartconfig_start. * * @attention Whether connect to AP succeed or not, this API should be called to free * memory taken by smartconfig_start. * * @return * - ESP_OK: succeed * - others: fail */ esp_err_t esp_smartconfig_internal_stop(void); /** * @brief Check the MD5 values of the OS adapter header files in IDF and WiFi library * * @attention 1. It is used for internal CI version check * * @return * - ESP_OK : succeed * - ESP_WIFI_INVALID_ARG : MD5 check fail */ esp_err_t esp_wifi_internal_osi_funcs_md5_check(const char *md5); /** * @brief Check the MD5 values of the crypto types header files in IDF and WiFi library * * @attention 1. It is used for internal CI version check * * @return * - ESP_OK : succeed * - ESP_WIFI_INVALID_ARG : MD5 check fail */ esp_err_t esp_wifi_internal_crypto_funcs_md5_check(const char *md5); /** * @brief Check the MD5 values of the esp_wifi_types.h in IDF and WiFi library * * @attention 1. It is used for internal CI version check * * @return * - ESP_OK : succeed * - ESP_WIFI_INVALID_ARG : MD5 check fail */ esp_err_t esp_wifi_internal_wifi_type_md5_check(const char *md5); /** * @brief Check the MD5 values of the esp_wifi.h in IDF and WiFi library * * @attention 1. It is used for internal CI version check * * @return * - ESP_OK : succeed * - ESP_WIFI_INVALID_ARG : MD5 check fail */ esp_err_t esp_wifi_internal_esp_wifi_md5_check(const char *md5); /** * @brief Allocate a chunk of memory for WiFi driver * * @attention This API is not used for DMA memory allocation. * * @param size_t size : Size, in bytes, of the amount of memory to allocate * * @return A pointer to the memory allocated on success, NULL on failure */ void *wifi_malloc( size_t size ); /** * @brief Reallocate a chunk of memory for WiFi driver * * @attention This API is not used for DMA memory allocation. * * @param void * ptr : Pointer to previously allocated memory, or NULL for a new allocation. * @param size_t size : Size, in bytes, of the amount of memory to allocate * * @return A pointer to the memory allocated on success, NULL on failure */ void *wifi_realloc( void *ptr, size_t size ); /** * @brief Callocate memory for WiFi driver * * @attention This API is not used for DMA memory allocation. * * @param size_t n : Number of continuing chunks of memory to allocate * @param size_t size : Size, in bytes, of the amount of memory to allocate * * @return A pointer to the memory allocated on success, NULL on failure */ void *wifi_calloc( size_t n, size_t size ); /** * @brief Update WiFi MAC time * * @param uint32_t time_delta : time duration since the WiFi/BT common clock is disabled * * @return Always returns ESP_OK */ typedef esp_err_t (* wifi_mac_time_update_cb_t)( uint32_t time_delta ); /** * @brief Update WiFi MAC time * * @param uint32_t time_delta : time duration since the WiFi/BT common clock is disabled * * @return Always returns ESP_OK */ esp_err_t esp_wifi_internal_update_mac_time( uint32_t time_delta ); /** * @brief Set current WiFi log level * * @param level Log level. * * @return * - ESP_OK: succeed * - ESP_FAIL: level is invalid */ esp_err_t esp_wifi_internal_set_log_level(wifi_log_level_t level); /** * @brief Set current log module and submodule * * @param module Log module * @param submodule Log submodule * @param enable enable or disable * If module == 0 && enable == 0, all log modules are disabled. * If module == 0 && enable == 1, all log modules are enabled. * If submodule == 0 && enable == 0, all log submodules are disabled. * If submodule == 0 && enable == 1, all log submodules are enabled. * * @return * - ESP_OK: succeed * - ESP_ERR_WIFI_NOT_INIT: WiFi is not initialized by esp_wifi_init * - ESP_ERR_WIFI_ARG: invalid argument */ esp_err_t esp_wifi_internal_set_log_mod(wifi_log_module_t module, uint32_t submodule, bool enable); /** * @brief Get current WiFi log info * * @param log_level the return log level. * @param log_mod the return log module and submodule * * @return * - ESP_OK: succeed */ esp_err_t esp_wifi_internal_get_log(wifi_log_level_t *log_level, uint32_t *log_mod); /** * @brief A general API to set/get WiFi internal configuration, it's for debug only * * @param cmd : ioctl command type * @param cfg : configuration for the command * * @return * - ESP_OK: succeed * - others: failed */ esp_err_t esp_wifi_internal_ioctl(int cmd, wifi_ioctl_config_t *cfg); /** * @brief Get the user-configured channel info * * @param ifx : WiFi interface * @param primary : store the configured primary channel * @param second : store the configured second channel * * @return * - ESP_OK: succeed */ esp_err_t esp_wifi_internal_get_config_channel(wifi_interface_t ifx, uint8_t *primary, uint8_t *second); /** * @brief Get the negotiated channel info after WiFi connection established * * @param ifx : WiFi interface * @param aid : the connection number when a STA connects to the softAP * @param primary : store the negotiated primary channel * @param second : store the negotiated second channel * @attention the aid param is only works when the ESP32 in softAP/softAP+STA mode * * @return * - ESP_OK: succeed */ esp_err_t esp_wifi_internal_get_negotiated_channel(wifi_interface_t ifx, uint8_t aid, uint8_t *primary, uint8_t *second); /** * @brief Get the negotiated bandwidth info after WiFi connection established * * @param ifx : WiFi interface * @param bw : store the negotiated bandwidth * * @return * - ESP_OK: succeed */ esp_err_t esp_wifi_internal_get_negotiated_bandwidth(wifi_interface_t ifx, uint8_t aid, uint8_t *bw); #if SOC_WIFI_HW_TSF /** * @brief Check if WiFi TSF is active * * @return * - true: Active * - false: Not active */ bool esp_wifi_internal_is_tsf_active(void); /** * @brief Update WIFI light sleep wake ahead time * */ void esp_wifi_internal_update_light_sleep_wake_ahead_time(uint32_t); #endif #if CONFIG_MAC_BB_PD /** * @brief Enable or disable powering down MAC and baseband when Wi-Fi is sleeping. * * @param enable : enable or disable * * @return * - ESP_OK: succeed */ esp_err_t esp_wifi_internal_set_mac_sleep(bool enable); /** * @brief mac bb sleep. */ void pm_mac_sleep(void); /** * @brief mac bb wakeup. */ void pm_mac_wakeup(void); #endif /** * @breif TxDone callback function type. Should be registered using esp_wifi_set_tx_done_cb() * * @param ifidx The interface id that the tx callback has been triggered from * @param data Pointer to the data transmitted * @param data_len Length of the data transmitted * @param txStatus True:if the data was transmitted sucessfully False: if data transmission failed */ typedef void (* wifi_tx_done_cb_t)(uint8_t ifidx, uint8_t *data, uint16_t *data_len, bool txStatus); /** * @brief Register the txDone callback function of type wifi_tx_done_cb_t * * @param cb The callback function * * @return * - ESP_OK: succeed * - ESP_ERR_WIFI_NOT_INIT: WiFi is not initialized by esp_wifi_init * - ESP_ERR_WIFI_NOT_STARTED: WiFi is not started by esp_wifi_start */ esp_err_t esp_wifi_set_tx_done_cb(wifi_tx_done_cb_t cb); /** * @brief Set device spp amsdu attributes * * @param ifx: WiFi interface * @param spp_cap: spp amsdu capable * @param spp_req: spp amsdu require * * @return * - ESP_OK: succeed * - ESP_ERR_WIFI_NOT_INIT: WiFi is not initialized by esp_wifi_init * - ESP_ERR_WIFI_IF : invalid WiFi interface */ esp_err_t esp_wifi_internal_set_spp_amsdu(wifi_interface_t ifidx, bool spp_cap, bool spp_req); /** * @brief Apply WiFi sleep optimization parameters * */ void esp_wifi_internal_optimize_wake_ahead_time(void); /** * @brief Set FTM Report log level * * @param log_lvl Log levels configuration * * @return * - ESP_OK: succeed * - ESP_ERR_NOT_SUPPORTED: No FTM support */ esp_err_t esp_wifi_set_ftm_report_log_level(ftm_report_log_level_t *log_lvl); #ifdef __cplusplus } #endif #endif /* __ESP_WIFI_H__ */
Effects of Coconut Juice on the Formation of Hyperlipidemia and Atherosclerosis ight-week old male quails were fed with high-fat fedder supplemented by 20 milliliters of coconut juice daily for each one for 12 weeks. Results showed coconut juice could increase their serum high den-sity lipoprotein cholesterol levels by 46.2%and reduce their index of atherosclerosis by 41.1%and liver total cholesterol levels by 26.3%in quails,all with a P-value of less than 0.01,as compared in those with high-fat fedder only. Coconut juice could inhibit the forination of atherosclerosis in animals obvi-ously. Coconut juice, as a kind of natural and nutritious drink with an action of keeping-fit,can play certain roles in preventing from hype rlipidemia and atherosclerosis with a dose used in animal experi-ments.
/* Copyright 2017-2019 Siemens AG Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Author(s): <NAME>, <NAME> */ package main import ( "CaRRoT/jenks" "encoding/binary" "strings" //"fmt" //"io/ioutil" "math" //"runtime" "sort" "strconv" "sync" ) var waitFor sync.WaitGroup func ScanInts(b []byte, hash string) { L.Println("scanning for ints") // create string/pos buffer instring := make([]bool, db.Files[hash].Size) db.Files[hash].Stringz.m.Lock() for _, el := range db.Files[hash].Stringz.S { for i := el.Pos; i < el.Pos+el.Len; i++ { instring[i] = true } } db.Files[hash].Stringz.m.Unlock() // try to find and interpret values for i := uint64(0); i < db.Files[hash].Size; i++ { if i < db.Files[hash].Size-2 { if !instring[i] && !instring[i+1] { waitFor.Add(1) go checkInt(b[i:], i, 2, hash) } } if i < db.Files[hash].Size-4 { if !instring[i] && !instring[i+1] && !instring[i+2] && !instring[i+3] { waitFor.Add(1) go checkInt(b[i:], i, 4, hash) } } if i < db.Files[hash].Size-8 { if !instring[i] && !instring[i+1] && !instring[i+2] && !instring[i+3] && !instring[i+4] && !instring[i+5] && !instring[i+6] && !instring[i+7] { waitFor.Add(1) go checkInt(b[i:], i, 8, hash) } } } waitFor.Wait() // posis := make([]uint64, len(db.Files[hash].Intz.I)) // for i, el := range db.Files[hash].Intz.I { // posis[i] = el.Pos // } // str := fmt.Sprint(posis) // ioutil.WriteFile("positions.csv", []byte(str), 0644) // TODO deduplicate sls := NewSliceSorter(db.Files[hash].Intz.I) if *fIntDedup { D.Println("starting int map dedup") belm := make([][]uint64, db.Files[hash].Size) newints := make([]Int, 0) lofints := len(db.Files[hash].Intz.I) for _, j := range sls.W64().All() { for i := j.Pos; i < j.Pos+j.ByteW; i++ { belm[i] = append(belm[i], j.ValU) } nj := j newints = append(newints, nj) } for _, j := range sls.W32().All() { blocked := false for _, k := range belm[j.Pos] { if k == j.ValU { blocked = true } } if !blocked { for i := j.Pos; i < j.Pos+j.ByteW; i++ { belm[i] = append(belm[i], j.ValU) } nj := j newints = append(newints, nj) } } for _, j := range sls.W16().All() { blocked := false for _, k := range belm[j.Pos] { if k == j.ValU { blocked = true } } if !blocked { for i := j.Pos; i < j.Pos+j.ByteW; i++ { belm[i] = append(belm[i], j.ValU) } nj := j newints = append(newints, nj) } } db.Files[hash].Intz.I = newints D.Println("finished int map dedup. reduced ints - before", (lofints), "after", (len(newints))) } D.Println("starting int sort") sort.Slice(db.Files[hash].Intz.I, func(i, j int) bool { return db.Files[hash].Intz.I[i].Pos < db.Files[hash].Intz.I[j].Pos }) D.Println("finished int sort") } type SliceSorter struct { ar []Int } func NewSliceSorter(i []Int) SliceSorter { return SliceSorter{ar: i} } func (s SliceSorter) All() (r []Int) { return s.ar } func (s SliceSorter) Big() (ns SliceSorter) { for _, i := range s.ar { if strings.HasSuffix(i.Enc, "be") { newi := i ns.ar = append(ns.ar, newi) } } return } func (s SliceSorter) Little() (ns SliceSorter) { for _, i := range s.ar { if strings.HasSuffix(i.Enc, "le") { newi := i ns.ar = append(ns.ar, newi) } } return } func (s SliceSorter) W16() (ns SliceSorter) { for _, i := range s.ar { if i.ByteW == 2 { newi := i ns.ar = append(ns.ar, newi) } } return } func (s SliceSorter) W32() (ns SliceSorter) { for _, i := range s.ar { if i.ByteW == 4 { newi := i ns.ar = append(ns.ar, newi) } } return } func (s SliceSorter) W64() (ns SliceSorter) { for _, i := range s.ar { if i.ByteW == 8 { newi := i ns.ar = append(ns.ar, newi) } } return } func checkInt(bf []byte, pos uint64, w int, hash string) { defer waitFor.Done() plausibleSigned := func(i int64) bool { if i == 0 { return false } if i < -10 { return false } if i < 0 { return true } if i == 256 { return false } if uint64(i) > db.Files[hash].Size { return false } return true } plausibleUnsigned := func(i uint64) bool { if i == 0 { return false } if i == 256 { return false } if i > db.Files[hash].Size { return false } return true } var sdata int64 var udata uint64 if w == 2 { udata = uint64(binary.BigEndian.Uint16(bf)) sdata = int64(int16(binary.BigEndian.Uint16(bf))) if plausibleUnsigned(udata) { db.Files[hash].Intz.Add(Int{Enc: "uint16be", ByteW: 2, Pos: pos, ValS: sdata, ValU: udata}) } if sdata < 0 || udata != uint64(sdata) { if plausibleSigned(sdata) { db.Files[hash].Intz.Add(Int{Enc: "int16be", ByteW: 2, Pos: pos, ValS: sdata, ValU: udata}) } } udata = uint64(binary.LittleEndian.Uint16(bf)) sdata = int64(int16(binary.LittleEndian.Uint16(bf))) if plausibleUnsigned(udata) { db.Files[hash].Intz.Add(Int{Enc: "uint16le", ByteW: 2, Pos: pos, ValS: sdata, ValU: udata}) } if sdata < 0 || udata != uint64(sdata) { if plausibleSigned(sdata) { db.Files[hash].Intz.Add(Int{Enc: "int16le", ByteW: 2, Pos: pos, ValS: sdata, ValU: udata}) } } } else if w == 4 { udata = uint64(binary.BigEndian.Uint32(bf)) sdata = int64(int32(binary.BigEndian.Uint32(bf))) if plausibleUnsigned(udata) { db.Files[hash].Intz.Add(Int{Enc: "uint32be", ByteW: 4, Pos: pos, ValS: sdata, ValU: udata}) } if sdata < 0 || udata != uint64(sdata) { if plausibleSigned(sdata) { db.Files[hash].Intz.Add(Int{Enc: "int32be", ByteW: 4, Pos: pos, ValS: sdata, ValU: udata}) } } udata = uint64(binary.LittleEndian.Uint32(bf)) sdata = int64(int32(binary.LittleEndian.Uint32(bf))) if plausibleUnsigned(udata) { db.Files[hash].Intz.Add(Int{Enc: "uint32le", ByteW: 4, Pos: pos, ValS: sdata, ValU: udata}) } if sdata < 0 || udata != uint64(sdata) { if plausibleSigned(sdata) { db.Files[hash].Intz.Add(Int{Enc: "int32le", ByteW: 4, Pos: pos, ValS: sdata, ValU: udata}) } } } else if w == 8 { udata = binary.BigEndian.Uint64(bf) sdata = int64(binary.BigEndian.Uint64(bf)) if plausibleUnsigned(udata) { db.Files[hash].Intz.Add(Int{Enc: "uint64be", ByteW: 8, Pos: pos, ValS: sdata, ValU: udata}) } if sdata < 0 || udata != uint64(sdata) { if plausibleSigned(sdata) { db.Files[hash].Intz.Add(Int{Enc: "int64be", ByteW: 8, Pos: pos, ValS: sdata, ValU: udata}) } } udata = binary.LittleEndian.Uint64(bf) sdata = int64(binary.LittleEndian.Uint64(bf)) if plausibleUnsigned(udata) { db.Files[hash].Intz.Add(Int{Enc: "uint64le", ByteW: 8, Pos: pos, ValS: sdata, ValU: udata}) } if sdata < 0 || udata != uint64(sdata) { if plausibleSigned(sdata) { db.Files[hash].Intz.Add(Int{Enc: "int64le", ByteW: 8, Pos: pos, ValS: sdata, ValU: udata}) } } } } func GroupInts(hash string, groupruns int, scaler float64) []struct { score float64 breaks [][2]int grcnt int } { L.Println("grouping ints") D.Println("starting jenks grouping") var m sync.Mutex var waitjenks sync.WaitGroup var groupings []struct { score float64 breaks [][2]int grcnt int } // var iters []int = []int{100000, 50000, 10000, 5000, 1000, 500, 100} // scale group count down to order of magnitude of len(intarray) - 10^3 lofa := len(db.Files[hash].Intz.I) Olofa := len(strconv.Itoa(lofa)) if Olofa <= 1 { m.Lock() groupings = append(groupings, struct { score float64 breaks [][2]int grcnt int }{0.1, [][2]int{{0, lofa - 1}}, 1}) m.Unlock() D.Println("jenks group scaling will not run, because order of length is too small") return groupings } D.Println("jenks group scaling:", lofa, Olofa, int(math.Pow(10, float64(Olofa-1)))) // 0.90^x for x in [0..15] will get us down to 20% of the original group count for threads := 0; threads < groupruns; threads++ { waitjenks.Add(1) go func(index int) { defer waitjenks.Done() //groups := int(math.Pow(10, float64(Olofa-index-1))) groups := int(math.Pow(10, float64(Olofa-1)) * math.Pow(scaler, float64(index))) D.Println("building jenks worker with", groups) // https://github.com/golang/go/wiki/InterfaceSlice var groupableSlice []jenks.Groupable = make([]jenks.Groupable, len(db.Files[hash].Intz.I)) for i, d := range db.Files[hash].Intz.I { groupableSlice[i] = d } D.Println("executing jenks worker with", groups) rv := jenks.JenksGroup(groupableSlice, groups) sc := jenks.ScoreJenksGroup(groupableSlice, rv) //D.Println(groups, "groups scored as", sc, "containing", len(rv)) m.Lock() groupings = append(groupings, struct { score float64 breaks [][2]int grcnt int }{sc, rv, groups}) m.Unlock() // posis := make([]uint64, len(rv)) // for i, el := range rv { // posis[i] = uint64(el[1]) // } // str := fmt.Sprint(posis) // ioutil.WriteFile("breaks"+strconv.Itoa(groups)+".csv", []byte(str), 0644) }(threads) } waitjenks.Wait() sort.Slice(groupings, func(i, j int) bool { return groupings[i].score < groupings[j].score }) D.Println("finished jenks grouping") return groupings }
Development and Validation of Information Technology Mentor Teacher Attitude Scale: A Pilot Study. Accepted: 19.02.2015 The aim of this study development and validation of a teacher attitude scale toward Information Technology Mentor Teachers (ITMT). ITMTs give technological support to other teachers for integration of technology in their lessons. In the literature, many instruments have been developed to measure teachers’ attitudes towards the technological tools and instructional technology but almost none proposed to collect information about their attitudes towards ICT coordinators. Perception of teachers is very important because ICT support can play a crucial role in developing positive teacher attitudes toward technology and also successful implementation of technology in curricula. Most of the schools have expensive technological equipment but many teachers have been frustrated by lack of support in integrating these technologies into their classroom. In this regard, in order to evaluate teachers’ attitudes toward information technology mentor teachers, an attitude scale with 21 items was prepared by considering related literature, expert opinions and interview results. The scale was administered to 40 teachers form a public elementary school. Based upon the factor and reliability analysis, teacher attitude scale toward ITMT has been developed with 17 items and three factors. Coefficient alpha values belong to each subscales, confidence, willingness and efficacy were .88, .80 and .87 respectively.
<reponame>FrankRaiser/checkstyle-null-literal<filename>src/test/java/TestThisStuff.java<gh_stars>0 import java.util.Optional; import java.util.function.Predicate; import java.util.stream.Stream; /** * @author <NAME> <<EMAIL>> */ public class TestThisStuff { public static final int CONSTANT = 10; static <T> Stream<T> dropWhile(Predicate<T> p, Stream<T> s) { class MutableBoolean { boolean b; } MutableBoolean inTail=new MutableBoolean(); return s.filter(i -> inTail.b || !p.test(i) && (inTail.b=true)); } public Integer calculate() { Integer i = null; i.toString(); String nullString; if (i != null) { Optional.ofNullable("String"); } if (i == null) { System.out.println("should be found"); } return null; } }
<reponame>jasonesteele/eve-supply-chain<filename>src/client/app/modules/esi-client/model/getDogmaEffectsEffectIdModifier.ts /** * EVE Swagger Interface * An OpenAPI for EVE Online * * OpenAPI spec version: 0.7.5 * * * NOTE: This class is auto generated by the swagger code generator program. * https://github.com/swagger-api/swagger-codegen.git * Do not edit the class manually. */ /** * modifier object */ export interface GetDogmaEffectsEffectIdModifier { /** * func string */ func: string; /** * domain string */ domain?: string; /** * modified_attribute_id integer */ modifiedAttributeId?: number; /** * modifying_attribute_id integer */ modifyingAttributeId?: number; /** * effect_id integer */ effectId?: number; /** * operator integer */ operator?: number; }
/** * Create an ArmContainer from your own group and robot model object. * Note -- this takes ownership of the RobotModel pointer. See example * "create" functions above for example usage. */ ArmContainer(std::shared_ptr<Group> group, std::unique_ptr<robot_model::RobotModel> robot_model) : group_(group), robot_model_(std::move(robot_model)) { masses_.resize(robot_model_->getFrameCount(robot_model::FrameType::CenterOfMass)); robot_model_->getMasses(masses_); }
/** * An event posted when a thing is yoinked from a source. */ public static class YoinkDepartureEvent extends Event<YoinkDeparture> { YoinkDepartureEvent(Thing yoinkedThing, Thing source) { super(null, source, new YoinkDeparture(yoinkedThing)); } }
/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. */ #include "blingfire-compile_src_pch.h" #include "FALexBreaker.h" #include "FAUtf8Utils.h" #include "FAUtils_cl.h" namespace BlingFire { FALexBreaker::FALexBreaker() : m_fInitialized (false), m_fUseBytes (false), m_iXWordTag (0), m_iSegTag (0), m_iIgnoreTag (0), m_pbUTF8Text (NULL), m_cbUTF8Text (0), m_pTagSet (NULL), m_pOutText (NULL), m_pOutNormText (NULL), m_pNorm (NULL), m_uUtf8Offset (0), m_uTxtOffset (0), m_uTxtLen (0), m_iTokensSize (0), m_uMaxCharsAtOnce (0), m_uMaxTokensSize (0) { } FALexBreaker::~FALexBreaker () { Clear (); } void FALexBreaker::Clear () { m_fInitialized = false; m_fUseBytes = false; m_iXWordTag = 0; m_iSegTag = 0; m_iIgnoreTag = 0; m_pbUTF8Text = NULL; m_cbUTF8Text = 0; m_pTagSet = NULL; m_pOutText = NULL; m_pOutNormText = NULL; m_pNorm = NULL; m_uUtf8Offset = 0; m_uTxtOffset = 0; m_uTxtLen = 0; m_iTokensSize = 0; m_uMaxCharsAtOnce = 0; m_uMaxTokensSize = 0; } bool FALexBreaker::Initialize (const FAWbdConfKeeper * pConfig) { if (m_fInitialized) { return false; } /// bad input parameters LogAssert(pConfig); /// return into the initial state FALexBreaker::Clear (); /// setup WBD configuration m_wbd.SetConf (pConfig); // get tag values code should know about m_iXWordTag = pConfig->GetWbdTagXWord (); m_iSegTag = pConfig->GetWbdTagSeg (); m_iIgnoreTag = pConfig->GetWbdTagIgnore (); // get normalization map pointer, can be NULL m_pNorm = pConfig->GetCharMap (); /// last check, fails now or never int Input [1] = { 0x20 }; int Output [3]; m_wbd.Process (Input, 1, Output, 3); m_fInitialized = true; return true; } void FALexBreaker::SetOutText (FAArray_cont_t < char > * pOutText) { m_pOutText = pOutText; } void FALexBreaker::SetOutNormText (FAArray_cont_t < char > * pOutNormText) { m_pOutNormText = pOutNormText; } void FALexBreaker::SetTagSet (const FATagSet * pTagSet) { m_pTagSet = pTagSet; } void FALexBreaker::SetUseBytes (const bool fUseBytes) { m_fUseBytes = fUseBytes; } void FALexBreaker::DeInitialize () { Clear (); } bool FALexBreaker::BreakText (const UInt8 *pbUTF8Text, size_t cbUTF8Text) { LogAssert (m_fInitialized); LogAssert (pbUTF8Text || 0 == cbUTF8Text); LogAssert (cbUTF8Text < INT_MAX); if (0 == cbUTF8Text) { return true; } m_uMaxCharsAtOnce = (unsigned) cbUTF8Text; if (MaxBuffSize < m_uMaxCharsAtOnce) { m_uMaxCharsAtOnce = MaxBuffSize; } m_uMaxTokensSize = 3 * m_uMaxCharsAtOnce; m_pbUTF8Text = pbUTF8Text; m_cbUTF8Text = cbUTF8Text; return BreakText_int (pbUTF8Text, cbUTF8Text); } bool FALexBreaker::BreakText_int (const UInt8 *pbUTF8Text, size_t cbUTF8Text) { m_uUtf8Offset = 0; size_t FromPos = 0; size_t LastToPos = FromPos; if (m_uMaxCharsAtOnce < cbUTF8Text) { for (size_t ToPos = 0; ToPos < cbUTF8Text; ++ToPos) { // get the text length size_t TxtLen = ToPos - FromPos + 1; // word-break the text from the FromPos to LastToPos, because // current TxtLen is already too big to fit the buffer if (m_uMaxCharsAtOnce < TxtLen) { // no LastToPos was ever found, just take next m_uMaxCharsAtOnce if (LastToPos == FromPos) { LastToPos = FromPos + m_uMaxCharsAtOnce - 1; } // get current substring const UInt8 * pbSubStr = pbUTF8Text + FromPos; const size_t cbSubStrLen = LastToPos - FromPos + 1; m_uUtf8Offset = FromPos; FromPos = LastToPos + 1; LastToPos = FromPos; int Count = -1; if (false == m_fUseBytes) { // UTF-8 --> UTF-32 with offsets Count = FAStrUtf8ToArray ((const char*)pbSubStr, (int)cbSubStrLen, m_pTxt, m_pOffsets, m_uMaxCharsAtOnce); } else { // copy the bytes as is for (size_t i = 0; i < cbSubStrLen; ++i) { DebugLogAssert (i < m_uMaxCharsAtOnce); m_pTxt [i] = pbSubStr [i]; m_pOffsets [i] = (int) i; } Count = (int) cbSubStrLen; } if (0 > Count) { return false; } DebugLogAssert (m_uMaxCharsAtOnce >= (unsigned) Count); m_uTxtLen = Count; // process a buffer of UTF-32LE text ProcessTxt (); } // check for a good stop place else if (0x20 >= pbUTF8Text [ToPos]) { // remember biggest ToPos LastToPos = ToPos; } } } if (FromPos < cbUTF8Text) { m_uUtf8Offset = FromPos; // get current substring const UInt8 * pbSubStr = pbUTF8Text + FromPos; const size_t cbSubStrLen = cbUTF8Text - FromPos; // UTF-8 --> UTF-32 with offsets int Count = -1; if (false == m_fUseBytes) { // UTF-8 --> UTF-32 with offsets Count = FAStrUtf8ToArray ((const char*)pbSubStr, (int)cbSubStrLen, m_pTxt, m_pOffsets, m_uMaxCharsAtOnce); } else { // copy the bytes as is for (size_t i = 0; i < cbSubStrLen; ++i) { DebugLogAssert (i < m_uMaxCharsAtOnce); m_pTxt [i] = pbSubStr [i]; m_pOffsets [i] = (int) i; } Count = (int) cbSubStrLen; } if (0 < Count) { m_uTxtLen = Count; // process a buffer of UTF-32LE text ProcessTxt (); } else { return false; } } // of if (FromPos < cbUTF8Text) ... return true; } void FALexBreaker::ProcessTxt () { m_uTxtOffset = 0; m_iTokensSize = m_wbd.Process (m_pTxt, m_uTxtLen, m_pTokens, m_uMaxTokensSize); while (0 < m_iTokensSize) { // return words to the client ProcessTokens (); // see if we have more text to process if ((unsigned) m_iTokensSize < m_uMaxTokensSize) { break; } // see if we have more text to process const int LastTo = m_pTokens [m_iTokensSize - 1]; // [Tag, From, To] if ((unsigned) LastTo + 1 >= m_uTxtLen) { break; } // adjust Offset2 m_uTxtOffset += (LastTo + 1); // tokenize the rest m_iTokensSize = m_wbd.Process (m_pTxt + m_uTxtOffset, m_uTxtLen - m_uTxtOffset, m_pTokens, m_uMaxTokensSize); } } void FALexBreaker::ProcessTokens () const { DebugLogAssert (0 == m_iTokensSize % 3); int NormWord [MaxWordLen]; int NormWordLen = 0; int CxTokenFrom = -1; int CxTokenTo = -1; for (int i = 0; i < m_iTokensSize; i += 3) { const int Tag = m_pTokens [i]; if (Tag == m_iIgnoreTag) { continue; } const int From = m_pTokens [i + 1]; const int To = m_pTokens [i + 2]; const int TxtFrom = From + m_uTxtOffset; const int TxtTo = To + m_uTxtOffset; DebugLogAssert (TxtTo >= TxtFrom && (unsigned) TxtTo < m_uMaxCharsAtOnce); // fitst see if there was a complex token but current token // is already outside its boundaries if (-1 != CxTokenFrom && TxtFrom > CxTokenTo) { // put assembled complex token PutToken (m_iXWordTag, CxTokenFrom, CxTokenTo, NormWord, NormWordLen); // forget the complex token ever existed NormWordLen = 0; CxTokenFrom = -1; CxTokenTo = -1; } // see if there were no complex token read (the most common) if (-1 == CxTokenFrom) { // check if this one is not a complex token if (Tag != m_iXWordTag) { PutToken (Tag, TxtFrom, TxtTo); } else { CxTokenFrom = TxtFrom; CxTokenTo = TxtTo; } } // see if the current token is within the complex token else if (TxtTo <= CxTokenTo) { // check if that's a segment token, which we need to add if (Tag == m_iSegTag) { const int SegLen = TxtTo - TxtFrom + 1; const int NewNormWordLen = NormWordLen + SegLen; if (NewNormWordLen <= MaxWordLen) { memcpy (NormWord + NormWordLen, m_pTxt + TxtFrom, SegLen * sizeof (int)); NormWordLen = NewNormWordLen; } } else { PutToken (Tag, TxtFrom, TxtTo); } } } // see if complex token was the last one if (-1 != CxTokenFrom) { // put assembled complex token PutToken (m_iXWordTag, CxTokenFrom, CxTokenTo, NormWord, NormWordLen); } } inline void FALexBreaker:: PutToken(const int Tag, const int TxtFrom, int TxtTo, const int * pNormTxt, int NormTxtLen) const { // check the normalized word length LogAssert (MaxWordLen >= NormTxtLen); // UTF-32LE normalized word text buffer int NormTxt [MaxWordLen]; // normalized word in UTF-8 UInt8 NormUtf8 [FAUtf8Const::MAX_CHAR_SIZE * MaxWordLen]; // final normalized UTF-8 pointer and length const UInt8 * pbNormUtf8; size_t cbNormUtf8Len; // no normalization done or it did not change the surface word bool fUseRaw = false; // readjust TxtTo in such way that at most MaxWordLen characters are taken const int MaxTxtTo = TxtFrom + MaxWordLen - 1; if (TxtTo > MaxTxtTo) { TxtTo = MaxTxtTo; } // normalize Txt[TxtFrom..TxtTo], if pNormTxt/NormTxtLen is not provided if (0 >= NormTxtLen) { if (m_pNorm) { const int TxtLen = TxtTo - TxtFrom + 1; NormTxtLen = FANormalize (m_pTxt + TxtFrom, TxtLen, NormTxt, MaxWordLen, m_pNorm); // see if the result of the word normalization is longer than the MaxWordLen if (MaxWordLen < NormTxtLen) { NormTxtLen = MaxWordLen; } pNormTxt = NormTxt; // see if normalization actually changed anything if (0 == memcmp (m_pTxt + TxtFrom, NormTxt, sizeof(int)*TxtLen)) { fUseRaw = true; } } else { fUseRaw = true; } } // get surface string offsets const int Utf8From = m_pOffsets [TxtFrom]; const int Utf8To = m_pOffsets [TxtTo]; // surface string offset const size_t Offset = m_uUtf8Offset + Utf8From; // surface string length, the last character length can be greater than 1 const size_t Length = Utf8To - Utf8From + FAUtf8Size (m_pTxt [TxtTo]); // see if we can just get a surface string if (fUseRaw) { // get a surface string pointer and length pbNormUtf8 = m_pbUTF8Text + Offset; cbNormUtf8Len = Length; } else { // convert UTF-32LE to UTF-8 const int Utf8Len = FAArrayToStrUtf8 (pNormTxt, NormTxtLen, (char*) NormUtf8, FAUtf8Const::MAX_CHAR_SIZE * MaxWordLen); // we don't expect errors here because pTxt is a valid UTF-32LE LogAssert (0 <= Utf8Len); cbNormUtf8Len = Utf8Len; pbNormUtf8 = NormUtf8; } // the last check LogAssert (0 < Length && m_cbUTF8Text >= Length + Offset); // return an original word if (m_pOutText) { AddWord (m_pOutText, m_pbUTF8Text + Offset, Length, Tag); } // return normalized word if (m_pOutNormText) { AddWord (m_pOutNormText, pbNormUtf8, cbNormUtf8Len, Tag); } } inline void FALexBreaker::AddWord ( FAArray_cont_t < char > * pOutText, const UInt8 * pWord, const size_t Length, const int Tag ) const { LogAssert (pOutText); const int OldSize = pOutText->size (); const char * pTagStr = NULL; int TagStrLen = 0; int DelimLen = 0; if (m_pTagSet) { TagStrLen = m_pTagSet->Tag2Str (Tag, &pTagStr); LogAssert (0 < TagStrLen && pTagStr); DelimLen = 1; } // calc new space int NewSize = OldSize + int (Length) + TagStrLen + DelimLen; // add space for the delimiter if (0 != OldSize) { NewSize++; } pOutText->resize (NewSize, NewSize); char * pOut = pOutText->begin () + OldSize; // copy the delimiter if (0 != OldSize) { *pOut++ = char (FAFsmConst::CHAR_SPACE); } for (size_t i = 0; i < Length; ++i) { char Symbol = pWord [i]; if (FAFsmConst::CHAR_SPACE == Symbol) { Symbol = FAFsmConst::CHAR_MWE_DELIM; } *pOut++ = Symbol; } /// copy the tag string if (m_pTagSet) { *pOut++ = FAFsmConst::CHAR_TAG_DELIM; for (int i = 0; i < TagStrLen; ++i) { char Symbol = pTagStr [i]; *pOut++ = Symbol; } } } }
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_ #define GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_ #include <map> #include <string> #include <vector> #include "base/basictypes.h" #include "base/logging.h" #include "base/memory/ref_counted.h" #include "base/memory/scoped_ptr.h" #include "gpu/command_buffer/service/gl_utils.h" #include "gpu/command_buffer/service/shader_manager.h" namespace gpu { namespace gles2 { // Tracks the Programs. // // NOTE: To support shared resources an instance of this class will // need to be shared by multiple GLES2Decoders. class ProgramManager { public: // This is used to track which attributes a particular program needs // so we can verify at glDrawXXX time that every attribute is either disabled // or if enabled that it points to a valid source. class ProgramInfo : public base::RefCounted<ProgramInfo> { public: typedef scoped_refptr<ProgramInfo> Ref; static const int kMaxAttachedShaders = 2; struct UniformInfo { UniformInfo(GLsizei _size, GLenum _type, const std::string& _name); ~UniformInfo(); bool IsSampler() const { return type == GL_SAMPLER_2D || type == GL_SAMPLER_CUBE; } GLsizei size; GLenum type; bool is_array; std::string name; std::vector<GLint> element_locations; std::vector<GLuint> texture_units; }; struct VertexAttribInfo { VertexAttribInfo(GLsizei _size, GLenum _type, const std::string& _name, GLint _location) : size(_size), type(_type), location(_location), name(_name) { } GLsizei size; GLenum type; GLint location; std::string name; }; typedef std::vector<UniformInfo> UniformInfoVector; typedef std::vector<VertexAttribInfo> AttribInfoVector; typedef std::vector<int> SamplerIndices; explicit ProgramInfo(GLuint service_id); GLuint service_id() const { return service_id_; } const SamplerIndices& sampler_indices() { return sampler_indices_; } // Updates the program info after a successful link. void Update(); // Updates the program log info. void UpdateLogInfo(); const AttribInfoVector& GetAttribInfos() const { return attrib_infos_; } const VertexAttribInfo* GetAttribInfo(GLint index) const { return (static_cast<size_t>(index) < attrib_infos_.size()) ? &attrib_infos_[index] : NULL; } GLint GetAttribLocation(const std::string& name) const; const VertexAttribInfo* GetAttribInfoByLocation(GLuint location) const { if (location < attrib_location_to_index_map_.size()) { GLint index = attrib_location_to_index_map_[location]; if (index >= 0) { return &attrib_infos_[index]; } } return NULL; } const UniformInfo* GetUniformInfo(GLint index) const { return (static_cast<size_t>(index) < uniform_infos_.size()) ? &uniform_infos_[index] : NULL; } // Gets the location of a uniform by name. GLint GetUniformLocation(const std::string& name) const; // Gets the UniformInfo of a uniform by location. const UniformInfo* GetUniformInfoByLocation( GLint location, GLint* array_index) const; // Sets the sampler values for a uniform. // This is safe to call for any location. If the location is not // a sampler uniform nothing will happen. bool SetSamplers(GLint location, GLsizei count, const GLint* value); bool IsDeleted() const { return service_id_ == 0; } void GetProgramiv(GLenum pname, GLint* params); bool IsValid() const { return valid_; } void ClearLinkStatus() { link_status_ = false; } bool AttachShader(ShaderManager* manager, ShaderManager::ShaderInfo* info); bool DetachShader(ShaderManager* manager, ShaderManager::ShaderInfo* info); bool CanLink() const; const std::string* log_info() const { return log_info_.get(); } void set_log_info(const char* str) { log_info_.reset(str ? new std::string(str) : NULL); } bool InUse() const { DCHECK_GE(use_count_, 0); return use_count_ != 0; } private: friend class base::RefCounted<ProgramInfo>; friend class ProgramManager; // Info for each location struct LocationInfo { LocationInfo() : uniform_index(-1), array_index(-1) { } LocationInfo(GLint _uniform_index, GLint _array_index) : uniform_index(_uniform_index), array_index(_array_index) { } GLint uniform_index; // index of UniformInfo in uniform_infos_. GLint array_index; // index of location when used in array. }; ~ProgramInfo(); void IncUseCount() { ++use_count_; } void DecUseCount() { --use_count_; DCHECK_GE(use_count_, 0); } void MarkAsDeleted() { DCHECK_NE(service_id_, 0u); service_id_ = 0; } // Resets the program. void Reset(); const UniformInfo* AddUniformInfo( GLsizei size, GLenum type, GLint location, const std::string& name); void GetCorrectedVariableInfo( bool use_uniforms, const std::string& name, std::string* corrected_name, GLsizei* size, GLenum* type) const; void DetachShaders(ShaderManager* manager); int use_count_; GLsizei max_attrib_name_length_; // Attrib by index. AttribInfoVector attrib_infos_; // Attrib by location to index. std::vector<GLint> attrib_location_to_index_map_; GLsizei max_uniform_name_length_; // Uniform info by index. UniformInfoVector uniform_infos_; // Info for each location. std::vector<LocationInfo> location_infos_; // The indices of the uniforms that are samplers. SamplerIndices sampler_indices_; // The program this ProgramInfo is tracking. GLuint service_id_; // Shaders by type of shader. ShaderManager::ShaderInfo::Ref attached_shaders_[kMaxAttachedShaders]; // This is true if glLinkProgram was successful at least once. bool valid_; // This is true if glLinkProgram was successful last time it was called. bool link_status_; // Log info scoped_ptr<std::string> log_info_; }; ProgramManager(); ~ProgramManager(); // Must call before destruction. void Destroy(bool have_context); // Creates a new program info. void CreateProgramInfo(GLuint client_id, GLuint service_id); // Gets a program info ProgramInfo* GetProgramInfo(GLuint client_id); // Gets a client id for a given service id. bool GetClientId(GLuint service_id, GLuint* client_id) const; // Marks a program as deleted. If it is not used the info will be deleted. void MarkAsDeleted(ShaderManager* shader_manager, ProgramInfo* info); // Marks a program as used. void UseProgram(ProgramInfo* info); // Makes a program as unused. If deleted the program info will be removed. void UnuseProgram(ShaderManager* shader_manager, ProgramInfo* info); // Returns true if prefix is invalid for gl. static bool IsInvalidPrefix(const char* name, size_t length); // Check if a ProgramInfo is owned by this ProgramManager. bool IsOwned(ProgramInfo* info); private: // Info for each "successfully linked" program by service side program Id. // TODO(gman): Choose a faster container. typedef std::map<GLuint, ProgramInfo::Ref> ProgramInfoMap; ProgramInfoMap program_infos_; void RemoveProgramInfoIfUnused( ShaderManager* shader_manager, ProgramInfo* info); DISALLOW_COPY_AND_ASSIGN(ProgramManager); }; } // namespace gles2 } // namespace gpu #endif // GPU_COMMAND_BUFFER_SERVICE_PROGRAM_MANAGER_H_
export { useMachine } from '@xstate/react/lib/fsm'; export * from './photoModal'; export * from './sportsSection'; export * from './articleMachine'; export * from './authorMachine';
// Evaluate returns the evaluated value of this ifBlock's expression if // predicate evaluates to true, otherwise returns an empty string func (ib *ifBlock) Evaluate() string { if ib == nil { return "" } if strings.ToLower(ib.predicateExpr[0]) == "true" { return ib.then.Evaluate() } else if ib.otherwise != nil { return ib.otherwise.Evaluate() } return "" }
package se.liu.ida.rspqlstar.store.dictionary; import java.util.concurrent.atomic.AtomicInteger; public class IdFactory { static final public long REFERENCE_BIT = IdFactory.makeLong("1000000000000000000000000000000000000000000000000000000000000000"); // Note: Although we don't use embedded nodes here, we could embed datatypes and values as in CQELS static private AtomicInteger nodeIdCounter = new AtomicInteger(); static private AtomicInteger referenceIdCounter = new AtomicInteger();; /** * Create the next node ID for the node dictionary. * * @return */ public static long nextNodeId() { return nodeIdCounter.incrementAndGet(); } /** * Get current node ID. * * @return */ public static long getNodeId() { return nodeIdCounter.get(); } /** * Create the next reference ID for the node dictionary. * * @return */ public static long nextReferenceKeyId() { return REFERENCE_BIT + referenceIdCounter.incrementAndGet(); } /** * Returns true if the provided id is a reference triple id. * * TODO: Null value from node dictionary is -1. Will this work if a dummy triple node is bound to a var? * @param id * @return */ public static boolean isReferenceId(long id) { return (id >>> 63) == 1; } /** * Produces a long for a 64 bit string. * Note: Java encodes strings starting with 1 as negative values. This means that all 64 bits strings need to be * parsed as negative 63 bit values. * * @param input * @return */ public static long makeLong(String input) { if (input.substring(0, 1).equals("1")) { return -1 * (Long.MAX_VALUE - Long.parseLong(input.substring(1), 2) + 1); } else { return Long.parseLong(input, 2); } } public static long getReferenceIdBody(long id) { return id & ~REFERENCE_BIT; } public static void reset(){ nodeIdCounter = new AtomicInteger(); referenceIdCounter = new AtomicInteger(); } }
<gh_stars>0 import React, { createContext, useCallback, useState, useContext } from 'react'; import UsersRepository from '../students/users/fakes/FakeUsersRepository'; interface LoginCredentials { email: string; password: string; } interface AuthState { name: string | undefined; } interface AuthContextData { email: string | undefined; login(credentials: LoginCredentials): Promise<void>; logout(): void; } const AuthContext = createContext<AuthContextData>({} as AuthContextData); const AuthProvider: React.FC = ({ children }) => { const usersRepository = new UsersRepository(); const [data, setData] = useState<AuthState>(() => { const name = localStorage.getItem('@iCollege:user'); if (name) { return { name }; } return {} as LoginCredentials; }); const login = useCallback( async ({ email, password }) => { const response = await usersRepository.login(email, password); const name = response?.name; localStorage.setItem('@iCollege:user', JSON.stringify(name)); setData({ name }); }, [usersRepository], ); const logout = useCallback(() => { localStorage.removeItem('@iCollege:user'); setData({} as AuthState); }, []); return ( <AuthContext.Provider value={{ email: data.name, login, logout }}> {children} </AuthContext.Provider> ); }; function useAuth(): AuthContextData { const context = useContext(AuthContext); if (!context) { throw new Error('useAuth mus be use whitin an AuthProvider'); } return context; } export { AuthProvider, useAuth };
<filename>main/sal/qa/osl/file/test_cpy_wrt_file.cxx /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_sal.hxx" #include "gtest/gtest.h" #include <osl/file.hxx> #include <osl/thread.h> #include <rtl/ustring.hxx> using namespace osl; using namespace rtl; //######################################## #ifdef UNX # define COPY_SOURCE_PATH "/home/tr109510/ucbhelper.cxx" # define COPY_DEST_PATH "/mnt/mercury08/ucbhelper.cxx" #else /* if WNT */ # define COPY_SOURCE_PATH "d:\\msvcr70.dll" # define COPY_DEST_PATH "x:\\tra\\msvcr70.dll" #endif class test_osl_copyFile : public ::testing::Test { public: }; TEST_F(test_osl_copyFile, cp_file) { rtl::OUString src_url; FileBase::getFileURLFromSystemPath(rtl::OUString::createFromAscii(COPY_SOURCE_PATH), src_url); rtl::OUString dest_url; FileBase::getFileURLFromSystemPath(rtl::OUString::createFromAscii(COPY_DEST_PATH), dest_url); FileBase::RC err = File::copy(src_url, dest_url); ASSERT_TRUE(err != FileBase::E_None) << "Copy didn't recognized disk full"; } //######################################## #ifdef UNX # define WRITE_DEST_PATH "/mnt/mercury08/muell.tmp" #else /* if WNT */ # define WRITE_DEST_PATH "d:\\tmp_data.tmp" #endif class test_osl_writeFile : public ::testing::Test { public: }; TEST_F(test_osl_writeFile, wrt_file) { rtl::OUString dest_url; FileBase::getFileURLFromSystemPath(rtl::OUString::createFromAscii(WRITE_DEST_PATH), dest_url); File tmp_file(dest_url); rtl::OUString suErrorMsg = rtl::OUString::createFromAscii("File creation failed: ")+ dest_url; FileBase::RC err = tmp_file.open(osl_File_OpenFlag_Write | osl_File_OpenFlag_Create); ASSERT_TRUE(err == FileBase::E_None || err == FileBase::E_EXIST) << suErrorMsg.pData; char buffer[50000]; sal_uInt64 written = 0; err = tmp_file.write((void*)buffer, sizeof(buffer), written); err = tmp_file.sync(); ASSERT_TRUE(err != FileBase::E_None) << "Write didn't recognized disk full"; tmp_file.close(); } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
<reponame>timmartin/skulpt<gh_stars>1000+ x = 'OK', print x[0]
def _var_names(self, intent='input'): assert(intent in ['input', 'output']) names = set() for (name, component) in self.items(): try: for var_name in get_var_names(component, intent=intent): names.add(var_name) except AttributeError: pass return list(names)
<filename>src/days/day_19/report.rs use std::collections::HashSet; use std::str::FromStr; use itertools::Itertools; use crate::days::day_19::vector::Vector; use crate::parse_input; #[derive(Debug)] pub struct Report { beacons: HashSet<Vector>, } impl Report { pub(crate) fn nb_beacons(&self) -> usize { self.beacons.len() } } impl Report { pub fn _beacons(&self) -> &HashSet<Vector> { &self.beacons } pub fn merge(&mut self, other:Report) { self.beacons.extend(other.beacons); } } #[derive(Debug)] pub struct Match { report: Report, offset:Vector, nb_common: usize, } impl Match { pub fn offset(self) -> Vector { self.offset } pub fn report(self) -> Report { self.report } } impl FromStr for Report { type Err = String; fn from_str(report: &str) -> Result<Self, Self::Err> { let lines: Vec<&str> = report.split('\n').collect(); let beacons = lines.iter().skip(1).map(|l| parse_input!(l,Vector)).collect(); Ok(Report { beacons }) } } impl Report { pub fn translate(&self, offset: &Vector) -> Report { let beacons = self.beacons.iter().map(|p| p.translate(offset)).collect(); Report { beacons } } pub fn rotate(&self, rotation: u8) -> Report { let beacons = self.beacons.iter().map(|p| p.rotate(rotation)).collect(); Report { beacons } } pub fn find_match(&self, report: &Report) -> Option<Match> { (0..24) .map(|rotation_idx| self.find_offset(rotation_idx, report)) .flatten() .max_by(|t1, t2| t1.nb_common.cmp(&t2.nb_common)) } pub fn find_offset(&self, rotation_idx: u8, report: &Report) -> Option<Match> { let transformed_report = report.rotate(rotation_idx); let offsets = self.beacons.iter() .flat_map(|beacon1| transformed_report.beacons.iter().map(|beacon2| beacon1.subtract(beacon2))) .counts_by(|o| o); offsets .iter() .max_by(|o1, o2| o1.1.cmp(o2.1)) .map(|(p, n)| Match { report:transformed_report.translate(p), nb_common: *n, offset:p.clone() }) .filter(|m| m.nb_common >= 12) } }
<reponame>icehofman/currency-quotation-api package com.icehofman.currency.quotation.api.models; import org.junit.Assert; import org.junit.Test; public class CurrencyModelTest { @Test public void currencyConstructor() { String line = "20/11/2014;009;A;ETB;0,12590000;0,12720000;20,00200000;20,20200000"; CurrencyModel currencyModel = new CurrencyModel(line); Assert.assertNotNull(currencyModel); } @Test(expected = IllegalArgumentException.class) public void currencyConstructorWithBrokenLine() { String line = "123;23;x;u;j"; CurrencyModel currencyModel = new CurrencyModel(line); Assert.assertNull(currencyModel); } }
/** * Generate indirect jump to unknown destination * * @returns VBox status code. * @param pVM The cross context VM structure. * @param pPatch Patch record * @param pCpu Disassembly state * @param pCurInstrGC Current instruction address */ int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC) { PATMCALLINFO callInfo; uint32_t offset; uint32_t i, size; int rc; rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC); if (rc == VERR_NO_MEMORY) return rc; AssertRCReturn(rc, rc); PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE); Log(("patmPatchGenIndirectJump\n")); Assert(pCpu->Param1.cb == 4); Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J); offset = 0; if (pCpu->fPrefix & DISPREFIX_SEG) pPB[offset++] = DISQuerySegPrefixByte(pCpu); pPB[offset++] = 0xFF; pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 , pCpu->ModRM.Bits.Rm); i = 2; if (pCpu->fPrefix & DISPREFIX_OPSIZE) i++; if (pCpu->fPrefix & DISPREFIX_SEG) i++; rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i); AssertRCReturn(rc, rc); offset += (pCpu->cbInstr - i); size = (RTHCUINTPTR)&pPB[offset] & 3; if (size) size = 4 - size; for (i=0;i<size;i++) { pPB[offset++] = 0x90; } PATCHGEN_EPILOG(pPatch, offset); PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmJumpIndirectRecord.cbFunction); callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr; callInfo.pTargetGC = 0xDEADBEEF; size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmJumpIndirectRecord, 0, false, &callInfo); PATCHGEN_EPILOG(pPatch, size); STAM_COUNTER_INC(&pVM->patm.s.StatGenJump); return VINF_SUCCESS; }
use super::circuits::*; use super::gadgets::*; use super::proofs::*; use super::synthesis::Basic; use super::{Curve, Field}; use std::marker::PhantomData; #[derive(Clone)] pub struct RecursiveProof<E1: Curve, E2: Curve> { proof: Proof<E1>, oldproof1: Leftovers<E1>, oldproof2: Leftovers<E2>, deferred: Deferred<E2::Scalar>, payload: Vec<u8>, } impl<E1, E2> RecursiveProof<E1, E2> where E1: Curve<Base = <E2 as Curve>::Scalar>, E2: Curve<Base = <E1 as Curve>::Scalar>, { pub fn create_proof<CS: RecursiveCircuit<E1::Scalar> + RecursiveCircuit<E2::Scalar>>( e1params: &Params<E1>, e2params: &Params<E2>, old_proof: Option<&RecursiveProof<E2, E1>>, circuit: &CS, new_payload: &[u8], ) -> Result<Self, SynthesisError> { let (newdeferred, new_leftovers, old_leftovers, forkvalues) = match old_proof { Some(old_proof) => { let (_, newdeferred, l1, l2, forkvalues) = old_proof.verify_inner(e2params, e1params, circuit)?; (newdeferred, l1, l2, forkvalues) } None => ( Deferred::dummy(e2params.k), Leftovers::dummy(e2params), Leftovers::dummy(e1params), vec![0; e2params.k], ), }; let mut circuit = VerificationCircuit::<E1, E2, _> { _marker: PhantomData, params: e2params, base_case: None, proof: None, inner_circuit: circuit, new_payload, forkvalues: Some(&forkvalues[..]), old_leftovers: Some(old_leftovers.clone()), new_leftovers: Some(new_leftovers.clone()), deferred: Some(newdeferred.clone()), }; if old_proof.is_some() { circuit.base_case = Some(false); circuit.proof = old_proof; } else { circuit.base_case = Some(true); } // Now make the proof... let (proof, _) = Proof::new::<_, Basic>(e1params, &circuit, &old_leftovers)?; Ok(RecursiveProof { proof, oldproof1: old_leftovers, oldproof2: new_leftovers, deferred: newdeferred, payload: new_payload.to_vec(), }) } pub(crate) fn verify_inner<CS: RecursiveCircuit<E1::Scalar> + RecursiveCircuit<E2::Scalar>>( &self, e1params: &Params<E1>, e2params: &Params<E2>, circuit: &CS, ) -> Result< ( bool, Deferred<E1::Scalar>, Leftovers<E1>, Leftovers<E2>, Vec<u8>, ), SynthesisError, > { let circuit1 = VerificationCircuit::<E1, E2, _> { _marker: PhantomData, params: e2params, base_case: None, proof: None, inner_circuit: circuit, new_payload: &self.payload, forkvalues: None, old_leftovers: None, new_leftovers: None, deferred: None, }; let circuit2 = VerificationCircuit::<E2, E1, _> { _marker: PhantomData, params: e1params, base_case: None, proof: None, inner_circuit: circuit, new_payload: &self.payload, forkvalues: None, old_leftovers: None, new_leftovers: None, deferred: None, }; // The public inputs for the proof consists of // 1. The (new) payload. // 2. The leftovers that should be used to verify this proof. // 3. The leftovers that should be used to construct a proof // for the next proof. // 4. The deferred information that has to be manually checked // by the verifier. let mut inputs = vec![]; inputs.extend(self.payload.iter().cloned()); inputs.extend(self.oldproof1.to_bytes()); inputs.extend(self.oldproof2.to_bytes()); inputs.extend(self.deferred.to_bytes()); let mut k_commitment = e1params.generators[1]; let mut iter_gens = e1params.generators[2..].iter(); let mut bitinputs = vec![]; for byte in inputs { for i in 0..8 { let b = ((byte >> i) & 1) == 1; if b { bitinputs.push(E1::Scalar::one()); k_commitment = k_commitment + iter_gens.next().unwrap(); } else { iter_gens.next(); bitinputs.push(E1::Scalar::zero()); } } } let (worked, leftovers, deferred, forkvalues) = self.proof.verify::<_, Basic>( &self.oldproof1, e1params, &circuit1, &bitinputs, Some(k_commitment), )?; let worked = worked & self.oldproof2.verify::<_, Basic>(e2params, &circuit2)?; Ok(( worked, deferred, leftovers, self.oldproof2.clone(), forkvalues, )) } pub fn verify<CS: RecursiveCircuit<E1::Scalar> + RecursiveCircuit<E2::Scalar>>( &self, e1params: &Params<E1>, e2params: &Params<E2>, circuit: &CS, ) -> Result<bool, SynthesisError> { let circuit1 = VerificationCircuit::<E1, E2, _> { _marker: PhantomData, params: e2params, base_case: None, proof: None, inner_circuit: circuit, new_payload: &self.payload, forkvalues: None, old_leftovers: None, new_leftovers: None, deferred: None, }; let circuit2 = VerificationCircuit::<E2, E1, _> { _marker: PhantomData, params: e1params, base_case: None, proof: None, inner_circuit: circuit, new_payload: &self.payload, forkvalues: None, old_leftovers: None, new_leftovers: None, deferred: None, }; let (worked, deferred, a, b, _) = self.verify_inner(e1params, e2params, circuit)?; Ok(worked & self.deferred.verify(e2params.k) & deferred.verify(e1params.k) & a.verify::<_, Basic>(e1params, &circuit1)? & b.verify::<_, Basic>(e2params, &circuit2)?) } } pub(crate) struct VerificationCircuit<'a, C1: Curve, C2: Curve, CS: RecursiveCircuit<C1::Scalar>> { pub(crate) _marker: PhantomData<(C1, C2)>, pub(crate) params: &'a Params<C2>, pub(crate) base_case: Option<bool>, pub(crate) inner_circuit: &'a CS, pub(crate) proof: Option<&'a RecursiveProof<C2, C1>>, pub(crate) new_payload: &'a [u8], pub(crate) forkvalues: Option<&'a [u8]>, pub(crate) old_leftovers: Option<Leftovers<C1>>, pub(crate) new_leftovers: Option<Leftovers<C2>>, pub(crate) deferred: Option<Deferred<C2::Scalar>>, } impl<'a, E1: Curve, E2: Curve<Base = E1::Scalar>, Inner: RecursiveCircuit<E1::Scalar>> VerificationCircuit<'a, E1, E2, Inner> { fn verify_deferred<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, mut deferred: &[AllocatedBit], ) -> Result<(), SynthesisError> { // Unpack all of the deferred data let x = self.obtain_scalar_from_bits(cs.namespace(|| "pack x"), &deferred[0..128])?; deferred = &deferred[128..]; let y_old = self.obtain_scalar_from_bits(cs.namespace(|| "pack y_old"), &deferred[0..128])?; deferred = &deferred[128..]; let y_cur = self.obtain_scalar_from_bits(cs.namespace(|| "pack y_cur"), &deferred[0..128])?; deferred = &deferred[128..]; let y_new = self.obtain_scalar_from_bits(cs.namespace(|| "pack y_new"), &deferred[0..128])?; deferred = &deferred[128..]; let ky_opening = self.obtain_scalar_from_bits(cs.namespace(|| "pack ky_opening"), &deferred[0..256])?; deferred = &deferred[256..]; let tx_positive_opening = self.obtain_scalar_from_bits( cs.namespace(|| "pack tx_positive_opening"), &deferred[0..256], )?; deferred = &deferred[256..]; let tx_negative_opening = self.obtain_scalar_from_bits( cs.namespace(|| "pack tx_negative_opening"), &deferred[0..256], )?; deferred = &deferred[256..]; let sx_cur_opening = self .obtain_scalar_from_bits(cs.namespace(|| "pack sx_cur_opening"), &deferred[0..256])?; deferred = &deferred[256..]; let rx_opening = self.obtain_scalar_from_bits(cs.namespace(|| "pack rx_opening"), &deferred[0..256])?; deferred = &deferred[256..]; let rxy_opening = self.obtain_scalar_from_bits(cs.namespace(|| "pack rxy_opening"), &deferred[0..256])?; deferred = &deferred[256..]; let mut challenges_sq_old = vec![]; for i in 0..self.params.k { challenges_sq_old.push(self.get_challenge_scalar( cs.namespace(|| format!("pack old challenge {}", i)), &deferred[0..128], )?); deferred = &deferred[128..]; } let gx_old_opening = self .obtain_scalar_from_bits(cs.namespace(|| "pack gx_old_opening"), &deferred[0..256])?; deferred = &deferred[256..]; let mut challenges_sq_new = vec![]; for i in 0..self.params.k { challenges_sq_new.push(self.get_challenge_scalar( cs.namespace(|| format!("pack new challenge {}", i)), &deferred[0..128], )?); deferred = &deferred[128..]; } let b_x = self.obtain_scalar_from_bits(cs.namespace(|| "pack b_x"), &deferred[0..256])?; deferred = &deferred[256..]; let b_xy = self.obtain_scalar_from_bits(cs.namespace(|| "pack b_xy"), &deferred[0..256])?; deferred = &deferred[256..]; let b_y_old = self.obtain_scalar_from_bits(cs.namespace(|| "pack b_y_old"), &deferred[0..256])?; deferred = &deferred[256..]; let b_y_cur = self.obtain_scalar_from_bits(cs.namespace(|| "pack b_y_cur"), &deferred[0..256])?; deferred = &deferred[256..]; let b_y_new = self.obtain_scalar_from_bits(cs.namespace(|| "pack b_y_new"), &deferred[0..256])?; deferred = &deferred[256..]; assert_eq!(deferred.len(), 0); // Check that the inner proof's circuit check was satisfied for it, since // we can do scalar arithmetic more efficiently in our base field! :) let xinv = x.invert(cs.namespace(|| "xinv"))?; let yinv = y_cur.invert(cs.namespace(|| "yinv"))?; let xyinv = xinv.mul(cs.namespace(|| "xyinv"), &yinv)?; let xy = x.mul(cs.namespace(|| "xy"), &y_cur)?; let x_invy = x.mul(cs.namespace(|| "x_invy"), &yinv)?; let nk = self.params.k - 2; // let xinvn = xinv.pow(&[n as u64, 0, 0, 0]); let mut xinvn = xinv.clone(); for i in 0..nk { xinvn = xinvn.mul( cs.namespace(|| format!("xinv^{}", 2u32.pow(i as u32 + 1))), &xinvn, )?; } // let xinvd = xinvn.square().square(); let mut xinvd = xinvn.clone(); for i in 0..2 { xinvd = xinvd.mul( cs.namespace(|| format!("(xinv^n)^{}", 2u32.pow(i as u32 + 1))), &xinvd, )?; } // let yn = self.y_cur.pow(&[n as u64, 0, 0, 0]); let mut yn = y_cur.clone(); for i in 0..nk { yn = yn.mul( cs.namespace(|| format!("y^{}", 2u32.pow(i as u32 + 1))), &yn, )?; } // let xn = self.x.pow(&[n as u64, 0, 0, 0]); let mut xn = x.clone(); for i in 0..nk { xn = xn.mul( cs.namespace(|| format!("x^{}", 2u32.pow(i as u32 + 1))), &xn, )?; } // let xyinvn31 = xyinv.pow(&[(3 * n - 1) as u64, 0, 0, 0]); let mut xyinvn31 = xyinv.clone(); for i in 0..nk { xyinvn31 = xyinvn31.mul( cs.namespace(|| format!("xyinv^{}", 2u32.pow(i as u32 + 1))), &xyinvn31, )?; } { let tmp = xyinvn31.mul(cs.namespace(|| "xyinv^2n"), &xyinvn31)?; xyinvn31 = xyinvn31.mul(cs.namespace(|| "xyinv^3n"), &tmp)?; } xyinvn31 = xyinvn31.mul(cs.namespace(|| "xyinv^(3n-1)"), &xy)?; // let xinvn31 = (xinvn.square() * &xinvn) * &self.x; let xinvn31 = xinvn.mul(cs.namespace(|| "xinv^2n"), &xinvn)?; let xinvn31 = xinvn31.mul(cs.namespace(|| "xinv^3n"), &xinvn)?; let xinvn31 = xinvn31.mul(cs.namespace(|| "xinv^(3n-1)"), &x)?; // println!("circuit xyinvn31: {:?}", xyinvn31); // println!("circuit xinvn31: {:?}", xinvn31); let rhs = tx_positive_opening.mul(cs.namespace(|| "tx+opening * x"), &x)?; let tmp = tx_negative_opening.mul(cs.namespace(|| "tx-opening * xinvd"), &xinvd)?; let rhs = Combination::from(rhs) + tmp; let lhs = sx_cur_opening.mul(cs.namespace(|| "sx_cur_opening * xinvn"), &xinvn)?; let lhs = lhs.mul(cs.namespace(|| "sx_cur_opening * xinvn * yn"), &yn)?; // Computes x + x^2 + x^3 + ... + x^n fn compute_thing<F: Field, CS: ConstraintSystem<F>>( mut cs: CS, x: AllocatedNum<F>, k: usize, ) -> Result<Combination<F>, SynthesisError> { let mut acc = Combination::from(x); let mut cur = x.clone(); for _ in 0..k { let tmp = acc.mul( cs.namespace(|| format!("extend polynomial")), &Combination::from(cur), )?; cur = cur.mul(cs.namespace(|| "square cur"), &cur)?; acc = acc + tmp; } Ok(acc) } let thing = compute_thing(cs.namespace(|| "poly(xy, nk)"), xy, nk)?; let thing = thing + compute_thing(cs.namespace(|| "poly(x_invy, nk)"), x_invy, nk)?; let thing = thing.mul( cs.namespace(|| "(poly(xy, nk) + poly(x_invy, nk)) * xn"), &Combination::from(xn), )?; /* let lhs = lhs - &thing; let lhs = lhs + &(self.rxy_opening * &xyinvn31); let lhs = lhs * &(self.rx_opening * &xinvn31); let ky = self.ky_opening * &yn; let lhs = lhs - &ky; */ let tmp = rxy_opening.mul(cs.namespace(|| "rxy_opening * xyinvn31"), &xyinvn31)?; let lhs = Combination::from(lhs); let lhs = lhs + tmp; let lhs = lhs - thing; let tmp = rx_opening.mul(cs.namespace(|| "rx_opening * xinvn31"), &xinvn31)?; let lhs = lhs.mul( cs.namespace(|| "lhs * (rx_opening * xinvn31)"), &Combination::from(tmp), )?; let ky = ky_opening.mul(cs.namespace(|| "ky_opening * yn"), &yn)?; let lhs = Combination::from(lhs) - ky; let lhs = lhs.lc(&mut cs); let rhs = rhs.lc(&mut cs); cs.enforce_zero(lhs - &rhs); // Check gx_old_opening { let mut challenges_old = challenges_sq_old.clone(); for (i, c) in challenges_old.iter_mut().enumerate() { *c = c.sqrt(cs.namespace(|| format!("sqrt old challenge_sq {}", i)))?; } let mut challenges_old_inv = challenges_old.clone(); for (i, c) in challenges_old_inv.iter_mut().enumerate() { *c = c.invert(cs.namespace(|| format!("invert old challenge {}", i)))?; } let expected_gx_old_opening = self.compute_b( cs.namespace(|| "b_old(x)"), x, &challenges_old, &challenges_old_inv, )?; let lc = expected_gx_old_opening.lc(&mut cs); cs.enforce_zero(lc - gx_old_opening.get_variable()); } // Check the other `b` entries let mut challenges_new = challenges_sq_new.clone(); for (i, c) in challenges_new.iter_mut().enumerate() { *c = c.sqrt(cs.namespace(|| format!("sqrt new challenge_sq {}", i)))?; } let mut challenges_new_inv = challenges_new.clone(); for (i, c) in challenges_new_inv.iter_mut().enumerate() { *c = c.invert(cs.namespace(|| format!("invert new challenge {}", i)))?; } let expected_b_x = self.compute_b( cs.namespace(|| "b_new(x)"), x, &challenges_new, &challenges_new_inv, )?; let expected_b_xy = self.compute_b( cs.namespace(|| "b_new(xy)"), xy, &challenges_new, &challenges_new_inv, )?; let expected_b_y_old = self.compute_b( cs.namespace(|| "b_new(y_old)"), y_old, &challenges_new, &challenges_new_inv, )?; let expected_b_y_cur = self.compute_b( cs.namespace(|| "b_new(y_cur)"), y_cur, &challenges_new, &challenges_new_inv, )?; let expected_b_y_new = self.compute_b( cs.namespace(|| "b_new(y_new)"), y_new, &challenges_new, &challenges_new_inv, )?; let lc = expected_b_x.lc(&mut cs); cs.enforce_zero(lc - b_x.get_variable()); let lc = expected_b_xy.lc(&mut cs); cs.enforce_zero(lc - b_xy.get_variable()); let lc = expected_b_y_old.lc(&mut cs); cs.enforce_zero(lc - b_y_old.get_variable()); let lc = expected_b_y_cur.lc(&mut cs); cs.enforce_zero(lc - b_y_cur.get_variable()); let lc = expected_b_y_new.lc(&mut cs); cs.enforce_zero(lc - b_y_new.get_variable()); Ok(()) } fn compute_b<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, x: AllocatedNum<E1::Scalar>, challenges: &[AllocatedNum<E1::Scalar>], challenges_inv: &[AllocatedNum<E1::Scalar>], ) -> Result<Combination<E1::Scalar>, SynthesisError> { assert!(challenges.len() >= 1); assert_eq!(challenges.len(), challenges_inv.len()); Ok(if challenges.len() == 1 { // return *challenges_inv.last().unwrap() + *challenges.last().unwrap() * x; let tmp = x.mul( cs.namespace(|| "x * challenges[-1]"), challenges.last().unwrap(), )?; Combination::from(*challenges_inv.last().unwrap()) + tmp } else { // return (*challenges_inv.last().unwrap() + *challenges.last().unwrap() * x) // * compute_b( // x.square(), // &challenges[0..(challenges.len() - 1)], // &challenges_inv[0..(challenges.len() - 1)], // ); let tmp = x.mul( cs.namespace(|| "x * challenges[-1]"), challenges.last().unwrap(), )?; let tmp = Combination::from(*challenges_inv.last().unwrap()) + tmp; let x2 = x.mul(cs.namespace(|| "x^2"), &x)?; Combination::from( self.compute_b( cs.namespace(|| format!("b layer {}", challenges.len() - 1)), x2, &challenges[0..(challenges.len() - 1)], &challenges_inv[0..(challenges.len() - 1)], )? .mul(cs, &tmp)?, ) }) } fn num_equal_unless_base_case<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, base_case: AllocatedBit, lhs: &Combination<E1::Scalar>, rhs: &Combination<E1::Scalar>, ) -> Result<(), SynthesisError> { let not_basecase = base_case.get_value().map(|v| (!v).into()); // lhs - rhs * (1 - base_case) = 0 // if base_case is true, then 1 - base_case will be zero // if base_case is false, then lhs - rhs must be zero, and therefore they are equal let (a, b, c) = cs.multiply( || "num_equal_unless_base_case", || { let lhs = lhs.get_value().ok_or(SynthesisError::AssignmentMissing)?; let rhs = rhs.get_value().ok_or(SynthesisError::AssignmentMissing)?; let not_basecase = not_basecase.ok_or(SynthesisError::AssignmentMissing)?; Ok((lhs - &rhs, not_basecase, Field::zero())) }, )?; let lhs_lc = lhs.lc(&mut cs); let rhs_lc = rhs.lc(&mut cs); cs.enforce_zero(LinearCombination::from(a) - &lhs_lc + &rhs_lc); cs.enforce_zero(LinearCombination::from(b) - CS::ONE + base_case.get_variable()); cs.enforce_zero(LinearCombination::from(c)); Ok(()) } fn equal_unless_base_case<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, base_case: AllocatedBit, mut lhs: &[AllocatedBit], mut rhs: &[AllocatedBit], ) -> Result<(), SynthesisError> { assert_eq!(lhs.len(), rhs.len()); let mut i = 0; while lhs.len() > 0 { i += 1; let mut coeff = E1::Scalar::one(); let mut lhs_lc = Combination::zero(); let mut rhs_lc = Combination::zero(); let mut truncate_by = 0; for (i, (lhs, rhs)) in lhs.iter().zip(rhs.iter()).take(250).enumerate() { lhs_lc = lhs_lc + (Coeff::Full(coeff), Num::from(AllocatedNum::from(lhs.clone()))); rhs_lc = rhs_lc + (Coeff::Full(coeff), Num::from(AllocatedNum::from(rhs.clone()))); coeff = coeff + &coeff; truncate_by = i + 1; } self.num_equal_unless_base_case( cs.namespace(|| format!("check {}", i)), base_case.clone(), &lhs_lc, &rhs_lc )?; lhs = &lhs[truncate_by..]; rhs = &rhs[truncate_by..]; } // let not_basecase = base_case.get_value().map(|v| (!v).into()); // for (lhs, rhs) in lhs.iter().zip(rhs.iter()) { // // lhs - rhs * (1 - base_case) = 0 // // if base_case is true, then 1 - base_case will be zero // // if base_case is false, then lhs - rhs must be zero, and therefore they are equal // let (a, b, c) = cs.multiply( // || "equal_unless_base_case", // || { // let lhs = lhs.get_value().ok_or(SynthesisError::AssignmentMissing)?; // let rhs = rhs.get_value().ok_or(SynthesisError::AssignmentMissing)?; // let not_basecase = not_basecase.ok_or(SynthesisError::AssignmentMissing)?; // let lhs: E1::Scalar = lhs.into(); // let rhs: E1::Scalar = rhs.into(); // Ok((lhs - &rhs, not_basecase, Field::zero())) // }, // )?; // cs.enforce_zero(LinearCombination::from(a) - lhs.get_variable() + rhs.get_variable()); // cs.enforce_zero(LinearCombination::from(b) - CS::ONE + base_case.get_variable()); // cs.enforce_zero(LinearCombination::from(c)) // } Ok(()) } fn obtain_scalar_from_bits<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, bits: &[AllocatedBit], ) -> Result<AllocatedNum<E1::Scalar>, SynthesisError> { let mut value = Some(E1::Scalar::zero()); let mut cur = E1::Scalar::one(); let mut lc = LinearCombination::zero(); for bit in bits { if let Some(bit) = bit.get_value() { if bit { value = value.map(|value| value + &cur); } } lc = lc + (Coeff::Full(cur), bit.get_variable()); cur = cur + &cur; } let newnum = AllocatedNum::alloc(cs.namespace(|| "scalar"), || { value.ok_or(SynthesisError::AssignmentMissing) })?; cs.enforce_zero(lc - newnum.get_variable()); Ok(newnum) } fn witness_bits_from_fe<F: Field, CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, value: F, ) -> Result<Vec<AllocatedBit>, SynthesisError> { let mut tmp = Vec::with_capacity(256); let bytes = value.to_bytes(); for byte in &bytes[0..] { for i in 0..8 { let bit = ((*byte >> i) & 1) == 1; tmp.push(bit); } } let mut res = Vec::with_capacity(256); for (i, b) in tmp.into_iter().enumerate() { res.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", i)), || Ok(b), )?); } Ok(res) } fn verify_proof<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, base_case: AllocatedBit, k_commitment: &CurvePoint<E2>, old_leftovers: &[AllocatedBit], new_deferred: &[AllocatedBit], new_leftovers: &[AllocatedBit], ) -> Result<(), SynthesisError> { let mut transcript = RescueGadget::new(cs.namespace(|| "init Rescue"))?; let transcript = &mut transcript; // Commitments self.commit_point( cs.namespace(|| "commit k_commitment"), transcript, &k_commitment, )?; let r_commitment = CurvePoint::witness(cs.namespace(|| "witness r_commitment"), || { Ok(self .proof .map(|proof| proof.proof.r_commitment) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit r_commitment"), transcript, &r_commitment, )?; let y_cur = self.get_challenge(cs.namespace(|| "y_cur challenge"), transcript)?; let s_cur_commitment = CurvePoint::witness(cs.namespace(|| "witness s_cur_commitment"), || { Ok(self .proof .map(|proof| proof.proof.s_cur_commitment) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit s_cur_commitment"), transcript, &s_cur_commitment, )?; let t_positive_commitment = CurvePoint::witness(cs.namespace(|| "witness t_positive_commitment"), || { Ok(self .proof .map(|proof| proof.proof.t_positive_commitment) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit t_positive_commitment"), transcript, &t_positive_commitment, )?; let t_negative_commitment = CurvePoint::witness(cs.namespace(|| "witness t_negative_commitment"), || { Ok(self .proof .map(|proof| proof.proof.t_negative_commitment) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit t_negative_commitment"), transcript, &t_negative_commitment, )?; let x = self.get_challenge(cs.namespace(|| "x challenge"), transcript)?; let c_commitment = CurvePoint::witness(cs.namespace(|| "witness c_commitment"), || { Ok(self .proof .map(|proof| proof.proof.c_commitment) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit c_commitment"), transcript, &c_commitment, )?; let y_new = self.get_challenge(cs.namespace(|| "y_new challenge"), transcript)?; let s_new_commitment = CurvePoint::witness(cs.namespace(|| "witness s_new_commitment"), || { Ok(self .proof .map(|proof| proof.proof.s_new_commitment) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit s_new_commitment"), transcript, &s_new_commitment, )?; // // Openings let g = { let (x, y) = E2::one().get_xy().unwrap(); CurvePoint::<E2>::constant(x, y) }; let ky_opening_pt = g.multiply( cs.namespace(|| "ky_opening_pt"), &new_deferred[128 * 4..128 * 4 + 256], )?; self.commit_point( cs.namespace(|| "commit ky_opening_pt"), transcript, &ky_opening_pt, )?; let rx_opening_pt = g.multiply( cs.namespace(|| "rx_opening_pt"), &new_deferred[128 * 4 + 256 * 4..128 * 4 + 256 * 4 + 256], )?; self.commit_point( cs.namespace(|| "commit rx_opening_pt"), transcript, &rx_opening_pt, )?; let rxy_opening_pt = g.multiply( cs.namespace(|| "rxy_opening_pt"), &new_deferred[128 * 4 + 256 * 5..128 * 4 + 256 * 5 + 256], )?; self.commit_point( cs.namespace(|| "commit rxy_opening_pt"), transcript, &rxy_opening_pt, )?; let sx_old_opening_pt = CurvePoint::witness(cs.namespace(|| "witness sx_old_opening_pt"), || { Ok(self .proof .map(|proof| E2::one() * &proof.proof.sx_old_opening) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit sx_old_opening_pt"), transcript, &sx_old_opening_pt, )?; let sx_cur_opening_pt = g.multiply( cs.namespace(|| "sx_cur_opening_pt"), &new_deferred[128 * 4 + 256 * 3..128 * 4 + 256 * 3 + 256], )?; self.commit_point( cs.namespace(|| "commit sx_cur_opening_pt"), transcript, &sx_cur_opening_pt, )?; let tx_positive_opening_pt = g.multiply( cs.namespace(|| "tx_positive_opening_pt"), &new_deferred[128 * 4 + 256 * 1..128 * 4 + 256 * 1 + 256], )?; self.commit_point( cs.namespace(|| "commit tx_positive_opening_pt"), transcript, &tx_positive_opening_pt, )?; let tx_negative_opening_pt = g.multiply( cs.namespace(|| "tx_negative_opening_pt"), &new_deferred[128 * 4 + 256 * 2..128 * 4 + 256 * 2 + 256], )?; self.commit_point( cs.namespace(|| "commit tx_negative_opening_pt"), transcript, &tx_negative_opening_pt, )?; let sx_new_opening_pt = CurvePoint::witness(cs.namespace(|| "witness sx_new_opening_pt"), || { Ok(self .proof .map(|proof| E2::one() * &proof.proof.sx_new_opening) .unwrap_or(E2::zero())) })?; self.commit_point( cs.namespace(|| "commit sx_new_opening_pt"), transcript, &sx_new_opening_pt, )?; let gx_old_opening_pt = g.multiply( cs.namespace(|| "gx_old_opening_pt"), &new_deferred[256 * 6 + (4 + self.params.k) * 128..256 * 7 + (4 + self.params.k) * 128], )?; self.commit_point( cs.namespace(|| "commit gx_old_opening_pt"), transcript, &gx_old_opening_pt, )?; let z = self.get_challenge(cs.namespace(|| "z challenge"), transcript)?; // old_leftovers let s_old_commitment = CurvePoint::witness(cs.namespace(|| "witness s_old_commitment"), || { Ok(self .proof .map(|proof| proof.oldproof1.s_new_commitment) .unwrap_or(E2::zero())) })?; { let mut cs = cs.namespace(|| format!("s_old_commitment")); let (x, y) = s_old_commitment.get_xy(); let x = unpack_fe(cs.namespace(|| "unpack x"), &x)?; let y = unpack_fe(cs.namespace(|| "unpack y"), &y)?; self.equal_unless_base_case( cs.namespace(|| "x"), base_case.clone(), &x, &old_leftovers[0..256], )?; self.equal_unless_base_case( cs.namespace(|| "y"), base_case.clone(), &y, &old_leftovers[256..512], )?; } let g_old = CurvePoint::witness(cs.namespace(|| "witness g_old"), || { Ok(self .proof .map(|proof| proof.oldproof1.g_new) .unwrap_or(E2::zero())) })?; { let mut cs = cs.namespace(|| format!("g_old")); let (x, y) = g_old.get_xy(); let x = unpack_fe(cs.namespace(|| "unpack x"), &x)?; let y = unpack_fe(cs.namespace(|| "unpack y"), &y)?; self.equal_unless_base_case( cs.namespace(|| "x"), base_case.clone(), &x, &old_leftovers[256 * 2 + 128..256 * 3 + 128], )?; self.equal_unless_base_case( cs.namespace(|| "y"), base_case.clone(), &y, &old_leftovers[256 * 3 + 128..256 * 4 + 128], )?; } let p_commitment = { let mut cs = cs.namespace(|| "p_commitment"); /* let p_commitment = self.r_commitment; let p_commitment = p_commitment * &z + leftovers.s_new_commitment; let p_commitment = p_commitment * &z + self.s_cur_commitment; let p_commitment = p_commitment * &z + self.t_positive_commitment; let p_commitment = p_commitment * &z + self.t_negative_commitment; let p_commitment = p_commitment * &z + self.s_new_commitment; let p_commitment = p_commitment * &z + leftovers.g_new; */ let p_commitment = r_commitment.clone(); let p_commitment = p_commitment.multiply_endo(cs.namespace(|| "mul z 1"), &z)?; let p_commitment = p_commitment.add(cs.namespace(|| "add s_old_commitment"), &s_old_commitment)?; let p_commitment = p_commitment.multiply_endo(cs.namespace(|| "mul z 2"), &z)?; let p_commitment = p_commitment.add(cs.namespace(|| "add s_cur_commitment"), &s_cur_commitment)?; let p_commitment = p_commitment.multiply_endo(cs.namespace(|| "mul z 3"), &z)?; let p_commitment = p_commitment.add( cs.namespace(|| "add t_positive_commitment"), &t_positive_commitment, )?; let p_commitment = p_commitment.multiply_endo(cs.namespace(|| "mul z 4"), &z)?; let p_commitment = p_commitment.add( cs.namespace(|| "add t_negative_commitment"), &t_negative_commitment, )?; let p_commitment = p_commitment.multiply_endo(cs.namespace(|| "mul z 5"), &z)?; let p_commitment = p_commitment.add(cs.namespace(|| "add s_new_commitment"), &s_new_commitment)?; let p_commitment = p_commitment.multiply_endo(cs.namespace(|| "mul z 6"), &z)?; p_commitment.add(cs.namespace(|| "add g_old"), &g_old)? }; let p_opening = { let mut cs = cs.namespace(|| "p_commitment"); /* let p_opening = self.rx_opening; let p_opening = p_opening * &z + &self.sx_old_opening; let p_opening = p_opening * &z + &self.sx_cur_opening; let p_opening = p_opening * &z + &self.tx_positive_opening; let p_opening = p_opening * &z + &self.tx_negative_opening; let p_opening = p_opening * &z + &self.sx_new_opening; let p_opening = p_opening * &z + &gx_old_opening; */ let p_opening = rx_opening_pt; let p_opening = p_opening.multiply_endo(cs.namespace(|| "mul z 1"), &z)?; let p_opening = p_opening.add(cs.namespace(|| "add sx_old_opening_pt"), &sx_old_opening_pt)?; let p_opening = p_opening.multiply_endo(cs.namespace(|| "mul z 2"), &z)?; let p_opening = p_opening.add(cs.namespace(|| "add sx_cur_opening_pt"), &sx_cur_opening_pt)?; let p_opening = p_opening.multiply_endo(cs.namespace(|| "mul z 3"), &z)?; let p_opening = p_opening.add( cs.namespace(|| "add tx_positive_opening_pt"), &tx_positive_opening_pt, )?; let p_opening = p_opening.multiply_endo(cs.namespace(|| "mul z 4"), &z)?; let p_opening = p_opening.add( cs.namespace(|| "add tx_negative_opening_pt"), &tx_negative_opening_pt, )?; let p_opening = p_opening.multiply_endo(cs.namespace(|| "mul z 5"), &z)?; let p_opening = p_opening.add(cs.namespace(|| "add sx_new_opening_pt"), &sx_new_opening_pt)?; let p_opening = p_opening.multiply_endo(cs.namespace(|| "mul z 6"), &z)?; p_opening.add(cs.namespace(|| "add gx_old_opening_pt"), &gx_old_opening_pt)? }; let q_commitment = { let mut cs = cs.namespace(|| "q_commitment"); /* let q_commitment = self.c_commitment + (k_commitment * &z); let qy_opening = self.sx_cur_opening + &(ky_opening * &z); */ let q_commitment = k_commitment.multiply_endo(cs.namespace(|| "mul z 1"), &z)?; q_commitment.add(cs.namespace(|| "add c_commitment"), &c_commitment)? }; let qy_opening = { let mut cs = cs.namespace(|| "qy_opening"); let qy_opening = ky_opening_pt.multiply_endo(cs.namespace(|| "mul z 2"), &z)?; qy_opening.add(cs.namespace(|| "add sx_cur_opening_pt"), &sx_cur_opening_pt)? }; // 7 * 256 + (4 + 2k) * 128 let b = &[ &new_deferred[256 * 7 + (4 + 2 * self.params.k) * 128 ..256 * 7 + (4 + 2 * self.params.k) * 128 + 256], &new_deferred[256 * 8 + (4 + 2 * self.params.k) * 128 ..256 * 8 + (4 + 2 * self.params.k) * 128 + 256], &new_deferred[256 * 9 + (4 + 2 * self.params.k) * 128 ..256 * 9 + (4 + 2 * self.params.k) * 128 + 256], &new_deferred[256 * 10 + (4 + 2 * self.params.k) * 128 ..256 * 10 + (4 + 2 * self.params.k) * 128 + 256], &new_deferred[256 * 11 + (4 + 2 * self.params.k) * 128 ..256 * 11 + (4 + 2 * self.params.k) * 128 + 256], ]; let (g_new, challenges_sq_packed_new) = self.verify_inner_product( cs.namespace(|| "inner product"), &base_case, transcript, &[ p_commitment, r_commitment, c_commitment.clone(), q_commitment, c_commitment, ], &[ p_opening, rxy_opening_pt, sx_old_opening_pt, qy_opening, sx_new_opening_pt, ], b, )?; // new_leftovers { let mut cs = cs.namespace(|| format!("s_new_commitment")); let (x, y) = s_new_commitment.get_xy(); let x = unpack_fe(cs.namespace(|| "unpack x"), &x)?; let y = unpack_fe(cs.namespace(|| "unpack y"), &y)?; self.equal_unless_base_case( cs.namespace(|| "x"), base_case.clone(), &x, &new_leftovers[0..256], )?; self.equal_unless_base_case( cs.namespace(|| "y"), base_case.clone(), &y, &new_leftovers[256..512], )?; } { self.equal_unless_base_case( cs.namespace(|| "y_new in new_leftovers"), base_case.clone(), &y_new, &new_leftovers[512..512 + 128], )?; } { let mut cs = cs.namespace(|| format!("g_new")); let (x, y) = g_new.get_xy(); let x = unpack_fe(cs.namespace(|| "unpack x"), &x)?; let y = unpack_fe(cs.namespace(|| "unpack y"), &y)?; self.equal_unless_base_case( cs.namespace(|| "x"), base_case.clone(), &x, &new_leftovers[256 * 2 + 128..256 * 3 + 128], )?; self.equal_unless_base_case( cs.namespace(|| "y"), base_case.clone(), &y, &new_leftovers[256 * 3 + 128..256 * 4 + 128], )?; } for (i, challenge_sq_packed) in challenges_sq_packed_new.into_iter().enumerate() { self.equal_unless_base_case( cs.namespace(|| format!("challenge {} in new_leftovers", i)), base_case.clone(), &challenge_sq_packed, &new_leftovers[256 * 4 + 128 + 128 * i..256 * 4 + 128 + 128 * i + 128], )?; // 4 * 128 + 6 * 256 + k * 128 + 256 is the start self.equal_unless_base_case( cs.namespace(|| format!("challenge {} in new_deferred", i)), base_case.clone(), &challenge_sq_packed, &new_deferred[(4 * 128 + 6 * 256 + self.params.k * 128 + 256) + i * 128 ..(4 * 128 + 6 * 256 + self.params.k * 128 + 256) + i * 128 + 128], )?; } // x (deferred) { self.equal_unless_base_case( cs.namespace(|| "challenge x in new_deferred"), base_case.clone(), &x, &new_deferred[0..128], )?; } // y_cur (deferred) { self.equal_unless_base_case( cs.namespace(|| "challenge y_cur in new_deferred"), base_case.clone(), &y_cur, &new_deferred[128 * 2..128 * 2 + 128], )?; } // y_new (deferred) { self.equal_unless_base_case( cs.namespace(|| "challenge y_new in new_deferred"), base_case.clone(), &y_new, &new_deferred[128 * 3..128 * 3 + 128], )?; } Ok(()) } fn verify_inner_product<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, base_case: &AllocatedBit, transcript: &mut RescueGadget<E1::Scalar>, commitments: &[CurvePoint<E2>], openings: &[CurvePoint<E2>], b: &[&[AllocatedBit]], ) -> Result<(CurvePoint<E2>, Vec<Vec<AllocatedBit>>), SynthesisError> { assert_eq!(commitments.len(), openings.len()); let mut challenges_sq_packed = vec![]; let mut p = commitments.to_vec(); let mut v = openings.to_vec(); for i in 0..self.params.k { let mut cs = cs.namespace(|| format!("round {}", i)); let mut tmp = vec![]; for j in 0..commitments.len() { let L = CurvePoint::witness(cs.namespace(|| format!("witness L_{}", j)), || { Ok(self .proof .map(|proof| proof.proof.inner_product.rounds[i].L[j]) .unwrap_or(E2::zero())) })?; let R = CurvePoint::witness(cs.namespace(|| format!("witness R_{}", j)), || { Ok(self .proof .map(|proof| proof.proof.inner_product.rounds[i].R[j]) .unwrap_or(E2::zero())) })?; let l = CurvePoint::witness(cs.namespace(|| format!("witness l_{}", j)), || { Ok(self .proof .map(|proof| E2::one() * &proof.proof.inner_product.rounds[i].l[j]) .unwrap_or(E2::zero())) })?; let r = CurvePoint::witness(cs.namespace(|| format!("witness r_{}", j)), || { Ok(self .proof .map(|proof| E2::one() * &proof.proof.inner_product.rounds[i].r[j]) .unwrap_or(E2::zero())) })?; self.commit_point(cs.namespace(|| format!("commit L_{}", j)), transcript, &L)?; self.commit_point(cs.namespace(|| format!("commit R_{}", j)), transcript, &R)?; self.commit_point(cs.namespace(|| format!("commit l_{}", j)), transcript, &l)?; self.commit_point(cs.namespace(|| format!("commit r_{}", j)), transcript, &r)?; tmp.push((L, R, l, r)); } let forkvalue = AllocatedNum::alloc(cs.namespace(|| format!("fork for challenge {}", i)), || { let val = self.forkvalues.ok_or(SynthesisError::AssignmentMissing)?[i]; let fe = Field::from_u128(val as u128); Ok(fe) })?; transcript.absorb( cs.namespace(|| format!("transcript absorb fork value {}", i)), Num::from(forkvalue), )?; let challenge_sq_packed = self.get_challenge( cs.namespace(|| format!("round challenge {}", i)), transcript, )?; challenges_sq_packed.push(challenge_sq_packed.clone()); for (j, tmp) in tmp.into_iter().enumerate() { let L = tmp.0.multiply_endo( cs.namespace(|| format!("[challenge^2] L_{}", j)), &challenge_sq_packed, )?; let R = tmp.1.multiply_inv_endo( cs.namespace(|| format!("[challenge^-2] R_{}", j)), &challenge_sq_packed, )?; let l = tmp.2.multiply_endo( cs.namespace(|| format!("[challenge^2] l_{}", j)), &challenge_sq_packed, )?; let r = tmp.3.multiply_inv_endo( cs.namespace(|| format!("[challenge^-2] r_{}", j)), &challenge_sq_packed, )?; p[j] = p[j].add(cs.namespace(|| format!("p_{} + L_{}", j, j)), &L)?; p[j] = p[j].add(cs.namespace(|| format!("p_{} + R_{}", j, j)), &R)?; v[j] = v[j].add(cs.namespace(|| format!("p_{} + l_{}", j, j)), &l)?; v[j] = v[j].add(cs.namespace(|| format!("p_{} + r_{}", j, j)), &r)?; } } let g_new = CurvePoint::witness(cs.namespace(|| "witness G"), || { Ok(self .proof .map(|proof| proof.proof.inner_product.g) .unwrap_or(E2::zero())) })?; let g = { let (x, y) = E2::one().get_xy().unwrap(); CurvePoint::<E2>::constant(x, y) }; for j in 0..commitments.len() { let a = self.witness_bits_from_fe( cs.namespace(|| format!("witness a_{}", j)), self.proof .map(|proof| proof.proof.inner_product.a[j]) .unwrap_or(Field::zero()), )?; let (x1, y1) = p[j].get_xy(); let (x2, y2) = g_new .multiply(cs.namespace(|| format!("[a_{}] g_new", j)), &a)? .get_xy(); { let mut cs = cs.namespace(|| format!("p_{} == [a_{}] g_new", j, j)); self.num_equal_unless_base_case(cs.namespace(|| "x"), base_case.clone(), &Combination::from(x1), &Combination::from(x2))?; self.num_equal_unless_base_case(cs.namespace(|| "y"), base_case.clone(), &Combination::from(y1), &Combination::from(y2))?; } let (x1, y1) = v[j].get_xy(); let (x2, y2) = g .multiply(cs.namespace(|| "[a_{}] g"), &a)? .multiply(cs.namespace(|| format!("[a b_{}] g", j)), b[j])? .get_xy(); { let mut cs = cs.namespace(|| format!("v_{} == [a_{} b_{}] g", j, j, j)); self.num_equal_unless_base_case(cs.namespace(|| "x"), base_case.clone(), &Combination::from(x1), &Combination::from(x2))?; self.num_equal_unless_base_case(cs.namespace(|| "y"), base_case.clone(), &Combination::from(y1), &Combination::from(y2))?; } } Ok((g_new, challenges_sq_packed)) } fn commit_point<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, transcript: &mut RescueGadget<E1::Scalar>, point: &CurvePoint<E2>, ) -> Result<(), SynthesisError> { let (x, y) = point.get_xy(); transcript.absorb(cs.namespace(|| "absorb x"), x)?; transcript.absorb(cs.namespace(|| "absorb y"), y)?; Ok(()) } fn get_challenge_scalar<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, bits: &[AllocatedBit], ) -> Result<AllocatedNum<E1::Scalar>, SynthesisError> { assert_eq!(bits.len(), 128); let mut acc = Combination::from(Num::constant(E1::Scalar::one() + &E1::Scalar::one() + &E1::Scalar::one())); for i in 1..64 { let should_negate = &bits[i * 2]; let should_endo = &bits[i * 2 + 1]; // acc = acc + acc acc = acc.scale(E1::Scalar::from_u128(2)); // tmp = 1 - 2b // acc = acc + tmp acc = acc + Combination::from(AllocatedNum::one(&mut cs)); acc = acc + (Combination::from(AllocatedNum::from(should_negate.clone())) .scale(-E1::Scalar::from_u128(2))); // acc = (1 - b') acc + b' acc * beta // = acc - b' * acc + b' * acc * beta // = (b' * beta - b') * (acc) + acc let mut outval = None; let (a, b, c) = cs.multiply( || format!("should_endo round {}", i), || { let acc = acc.get_value().ok_or(SynthesisError::AssignmentMissing)?; let should_endo = should_endo .get_value() .ok_or(SynthesisError::AssignmentMissing)?; let should_endo = if should_endo { E1::Scalar::one() } else { E1::Scalar::zero() }; let beta = E1::Scalar::BETA; let lhs = should_endo * &beta - &should_endo; let rhs = acc; let out = lhs * &rhs; outval = Some(out); Ok((lhs, rhs, out)) }, )?; cs.enforce_zero( LinearCombination::from(a) + should_endo.get_variable() - (Coeff::Full(E1::Scalar::BETA), should_endo.get_variable()), ); let acclc = acc.lc(&mut cs); cs.enforce_zero(LinearCombination::from(b) - &acclc); acc = acc + Combination::from(Num::from(AllocatedNum::from_raw_unchecked(outval, c))); } let newacc = AllocatedNum::alloc(cs.namespace(|| "final acc value"), || { acc.get_value().ok_or(SynthesisError::AssignmentMissing) })?; let acclc = acc.lc(&mut cs); cs.enforce_zero(LinearCombination::from(newacc.get_variable()) - &acclc); Ok(newacc) } fn get_challenge<CS: ConstraintSystem<E1::Scalar>>( &self, mut cs: CS, transcript: &mut RescueGadget<E1::Scalar>, ) -> Result<Vec<AllocatedBit>, SynthesisError> { let num = transcript.squeeze(cs.namespace(|| "squeeze"))?; let mut bits = unpack_fe(cs.namespace(|| "unpack"), &num.into())?; bits.truncate(127); bits.push(AllocatedBit::one(cs)); Ok(bits) } } impl<'a, E1: Curve, E2: Curve<Base = E1::Scalar>, Inner: RecursiveCircuit<E1::Scalar>> Circuit<E1::Scalar> for VerificationCircuit<'a, E1, E2, Inner> { fn synthesize<CS: ConstraintSystem<E1::Scalar>>( &self, cs: &mut CS, ) -> Result<(), SynthesisError> { let mut payload_bits = vec![]; { let mut cs = cs.namespace(|| "new_payload"); for (j, byte) in self.new_payload.into_iter().enumerate() { for i in 0..8 { let bit = (*byte >> i) & 1 == 1; payload_bits.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } let mut leftovers1 = vec![]; { let mut cs = cs.namespace(|| "old_leftovers"); if let Some(l) = &self.old_leftovers { let bytes = l.to_bytes(); for (j, byte) in bytes.into_iter().enumerate() { for i in 0..8 { let bit = (byte >> i) & 1 == 1; leftovers1.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } else { // (256 * 2) + 128 + (256 * 2) + (128 * k) // = 256 * 4 + 128 * (k + 1) let num_bits = 256 * 4 + 128 * (self.params.k + 1); for i in 0..num_bits { leftovers1.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("bit {}", i)), || Ok(false), )?); } } } let mut leftovers2 = vec![]; { let mut cs = cs.namespace(|| "new_leftovers"); if let Some(l) = &self.new_leftovers { let bytes = l.to_bytes(); for (j, byte) in bytes.into_iter().enumerate() { for i in 0..8 { let bit = (byte >> i) & 1 == 1; leftovers2.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } else { // (256 * 2) + 128 + (256 * 2) + (128 * k) // = 256 * 4 + 128 * (k + 1) let num_bits = 256 * 4 + 128 * (self.params.k + 1); for i in 0..num_bits { leftovers2.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("bit {}", i)), || Ok(false), )?); } } } let mut deferred = vec![]; { let mut cs = cs.namespace(|| "deferred"); if let Some(l) = &self.deferred { let bytes = l.to_bytes(); for (j, byte) in bytes.into_iter().enumerate() { for i in 0..8 { let bit = (byte >> i) & 1 == 1; deferred.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } else { // 12 * 256 + (4 + 2k) * 128 let num_bits = 12 * 256 + (4 + 2 * self.params.k) * 128; for i in 0..num_bits { deferred.push(AllocatedBit::alloc_input_unchecked( cs.namespace(|| format!("deferred bit {}", i)), || Ok(false), )?); } } } // Check that all the inputs are booleans now that we've allocated // all of our public inputs. { let mut cs = cs.namespace(|| "constrain new_payload"); for (i, b) in payload_bits.iter().enumerate() { b.check(cs.namespace(|| format!("bit {}", i)))?; } } { let mut cs = cs.namespace(|| "constrain old_leftovers"); for (i, b) in leftovers1.iter().enumerate() { b.check(cs.namespace(|| format!("bit {}", i)))?; } } { let mut cs = cs.namespace(|| "constrain new_leftovers"); for (i, b) in leftovers2.iter().enumerate() { b.check(cs.namespace(|| format!("bit {}", i)))?; } } { let mut cs = cs.namespace(|| "constrain deferred"); for (i, b) in deferred.iter().enumerate() { b.check(cs.namespace(|| format!("bit {}", i)))?; } } // Is this the base case? let base_case = AllocatedBit::alloc(cs.namespace(|| "is base case"), || { self.base_case.ok_or(SynthesisError::AssignmentMissing) })?; // Compute k(Y) commitment let mut k_commitment = CurvePoint::<E2>::constant( self.params.generators_xy[1].0, self.params.generators_xy[1].1, ); // Attach payload for old proof let mut old_payload = vec![]; if let Some(proof) = &self.proof { let mut cs = cs.namespace(|| "old_payload"); for (j, byte) in proof.payload.iter().enumerate() { for i in 0..8 { let bit = ((*byte >> i) & 1) == 1; old_payload.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } else { let mut cs = cs.namespace(|| "base_payload"); for (i, bit) in self.inner_circuit.base_payload().into_iter().enumerate() { old_payload.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", i)), || Ok(bit), )?); } } let basecase_val = base_case.get_value().map(|v| v.into()); { let mut cs = cs.namespace(|| "constrain old_payload if base case"); for (i, (bit, old_payload_bit)) in self .inner_circuit .base_payload() .into_iter() .zip(old_payload.iter()) .enumerate() { let (a, b, c) = cs.multiply( || format!("(bit_{} - old_payload_bit_{}) * base_case = 0", i, i), || { let old_payload_bit = old_payload_bit .get_value() .ok_or(SynthesisError::AssignmentMissing)?; let basecase_val = basecase_val.ok_or(SynthesisError::AssignmentMissing)?; let lhs: E1::Scalar = bit.into(); let rhs: E1::Scalar = old_payload_bit.into(); Ok((lhs - &rhs, basecase_val, Field::zero())) }, )?; if bit { cs.enforce_zero( LinearCombination::from(a) - CS::ONE + old_payload_bit.get_variable(), ); } else { cs.enforce_zero(LinearCombination::from(a) + old_payload_bit.get_variable()); } cs.enforce_zero(LinearCombination::from(b) - base_case.get_variable()); cs.enforce_zero(LinearCombination::from(c)); } } let mut old_leftovers1 = vec![]; { let mut cs = cs.namespace(|| "old_proof"); if let Some(l) = &self.proof { let l = &l.oldproof1; let bytes = l.to_bytes(); for (j, byte) in bytes.into_iter().enumerate() { for i in 0..8 { let bit = (byte >> i) & 1 == 1; old_leftovers1.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } else { // (256 * 2) + 128 + (256 * 2) + (128 * k) // = 256 * 4 + 128 * (k + 1) let num_bits = 256 * 4 + 128 * (self.params.k + 1); for i in 0..num_bits { old_leftovers1.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", i)), || Ok(false), )?); } } } let mut old_deferred = vec![]; { let mut cs = cs.namespace(|| "old_deferred"); if let Some(l) = &self.proof { let l = &l.deferred; let bytes = l.to_bytes(); for (j, byte) in bytes.into_iter().enumerate() { for i in 0..8 { let bit = (byte >> i) & 1 == 1; old_deferred.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", 8 * j + i)), || Ok(bit), )?); } } } else { let dummy_deferred = Deferred::<E1::Scalar>::dummy(self.params.k); let bytes = dummy_deferred.to_bytes(); for (_, byte) in bytes.into_iter().enumerate() { for i in 0..8 { let bit = (byte >> i) & 1 == 1; old_deferred.push(AllocatedBit::alloc( cs.namespace(|| format!("bit {}", i)), || Ok(bit), )?); } } } } assert_eq!(old_deferred.len(), deferred.len()); let mut bits_for_k_commitment = vec![]; bits_for_k_commitment.extend(old_payload.clone()); bits_for_k_commitment.extend(old_leftovers1.clone()); bits_for_k_commitment.extend(leftovers1); bits_for_k_commitment.extend(old_deferred.clone()); { let mut cs = cs.namespace(|| "k_commitment"); for (i, (bit, gen)) in bits_for_k_commitment .into_iter() .zip(self.params.generators_xy[2..].iter()) .enumerate() { let gen = CurvePoint::constant(gen.0, gen.1); k_commitment = k_commitment.add_conditionally_incomplete( cs.namespace(|| format!("bit {}", i)), &gen, &Boolean::from(bit.clone()), )?; } } // println!("k inside circuit: {:?}", k_commitment); self.verify_deferred(cs.namespace(|| "verify deferred"), &old_deferred)?; self.verify_proof( cs.namespace(|| "verify proof"), base_case.clone(), &k_commitment, &old_leftovers1, &deferred, &leftovers2, )?; // deferred old challenges should be the same self.equal_unless_base_case( cs.namespace(|| "deferred[challeges] == old_leftovers1[challenges]"), base_case.clone(), &deferred[256 * 8..256 * 8 + 128 * self.params.k], &old_leftovers1[256 * 4 + 128..], )?; // deferred y_old should be the same self.equal_unless_base_case( cs.namespace(|| "deferred[y_old] == old_leftovers1[y_old]"), base_case.clone(), &deferred[128 * 1..128 * 2], &old_leftovers1[256 * 2..256 * 2 + 128], )?; self.inner_circuit.synthesize( &mut cs.namespace(|| "inner circuit"), &old_payload, &payload_bits, ) } }
n, m = map(int, input().split()) matrix = [] for _ in range(n): matrix.append(list(map(int, input().split()))) column = [int(input()) for _ in range(m)] for i in range(n): print(sum(a * b for a, b in zip(matrix[i], column)))
package nzcovid19cases import ( "encoding/json" "fmt" "strconv" "strings" "github.com/anaskhan96/soup" ) type RawCase struct { ReportedDate string Case int Location string Age string Gender string TravelRelated string LastCityBeforeNZ string FlightNumber string DepartureDate string ArrivalDate string CaseType string } type CaseStatsResponse struct { ConfirmedCasesTotal int ConfirmedCasesNew24h int ProbableCasesTotal int ProbableCasesNew24h int RecoveredCasesTotal int RecoveredCasesNew24h int //HospitalisedCasesTotal int //HospitalisedCasesNew24h int DeathCasesTotal int DeathCasesNew24h int } func parseRow(cols []soup.Root) RawCase { var c RawCase c.ReportedDate = cols[0].Text() c.Gender = cols[1].Text() c.Age = cols[2].Text() c.Location = cols[3].Text() // Handle case with erroneous colspan _, sigh := cols[3].Attrs()["colspan"] if sigh { return c } c.TravelRelated = cols[4].Text() c.LastCityBeforeNZ = cols[5].Text() c.FlightNumber = cols[6].Text() c.ArrivalDate = cols[7].Text() c.DepartureDate = cols[8].Text() return c } const ( CSVRenderType = "csv" JSONRenderType = "json" ) func parseStat(stat soup.Root) (int, int, error) { tds := stat.FindAll("td") if len(tds) != 2 { //nolint:gomnd return 0, 0, fmt.Errorf("expected two columns") } numString := strings.ReplaceAll(tds[0].Text(), ",", "") num, err := strconv.Atoi(numString) if err != nil { return 0, 0, fmt.Errorf("failed to convert %v to number: %w", tds[1].Text(), err) } if strings.TrimSpace(tds[1].Text()) == "" { return num, 0, nil } num24hString := strings.ReplaceAll(tds[1].Text(), ",", "") num24h, err := strconv.Atoi(num24hString) if err != nil { return 0, 0, fmt.Errorf("failed to convert %v to number: %w", tds[1].Text(), err) } return num, num24h, nil } func ScrapeCases() ([]*RawCase, error) { resp, err := soup.Get("https://www.health.govt.nz/our-work/diseases-and-conditions/" + "covid-19-novel-coronavirus/covid-19-current-cases/covid-19-current-cases-details") if err != nil { return nil, err } doc := soup.HTMLParse(resp) tables := doc.FindAll("table", "class", "table-style-two") var offset = 0 if len(tables) > 2 { //nolint:gomnd offset = 1 } rows := tables[offset].FindAll("tr") var cases []*RawCase //nolint:prealloc // Note: slice starting at 1, skipping the header for i, row := range rows[1:] { cols := row.FindAll("td") if len(cols) < 8 { //nolint:gomnd return nil, fmt.Errorf("table 1 row has %v columns, not at least 8", len(cols)) } c := parseRow(cols) c.CaseType = "confirmed" c.Case = len(rows) - i - 1 cases = append(cases, &c) } rows = tables[offset+1].FindAll("tr") // Note: slice starting at 1, skipping the header for i, row := range rows[1:] { cols := row.FindAll("td") if len(cols) != 9 { //nolint:gomnd return nil, fmt.Errorf("table 1 row has %v columns, not 9", len(cols)) } c := parseRow(cols) c.CaseType = "probable" c.Case = len(rows) - i - 1 cases = append(cases, &c) } return cases, nil } func RenderCases(normCases []*NormalisedCase, viewType string) (string, error) { validViewTypes := map[string]bool{ CSVRenderType: true, JSONRenderType: true, } if !validViewTypes[viewType] { return "", InvalidUsageError{fmt.Sprintf("unknown view type: %v", viewType)} } var sb strings.Builder switch viewType { case CSVRenderType: sb.WriteString( `"CaseNumber",` + `"ReportedDate",` + `"LocationName",` + `"AgeValid",` + `"OlderOrEqualToAge",` + `"YoungerThanAge",` + `"Gender",` + `"IsTravelRelated",` + `"DepartureDateValid",` + `"DepartureDate",` + `"ArrivalDateValid",` + `"ArrivalDate",` + `"LastCityBeforeNZ",` + `"FlightNumber",` + `"CaseType"`, ) sb.WriteRune('\n') for _, c := range normCases { sb.WriteString(fmt.Sprintf( `%v, "%v", "%v", "%v", "%v", "%v", %v, %v, "%v", "%v", "%v", "%v", "%v", "%v", "%v", "%v"`, c.CaseNumber, c.ReportedDate, c.LocationName, c.Age.Valid, c.Age.OlderOrEqualToAge, c.Age.YoungerThanAge, c.Gender, c.IsTravelRelated.Valid, c.IsTravelRelated.Value, c.DepartureDate.Valid, c.DepartureDate.Value, c.ArrivalDate.Valid, c.ArrivalDate.Value, c.LastCityBeforeNZ, c.FlightNumber, c.CaseType), ) sb.WriteRune('\n') } case JSONRenderType: b, err := json.MarshalIndent(normCases, "", " ") if err != nil { return "", err } sb.Write(b) } return sb.String(), nil } func ScrapeCaseStats() (CaseStatsResponse, error) { var cS CaseStatsResponse resp, err := soup.Get("https://www.health.govt.nz/our-work/diseases-and-conditions/" + "covid-19-novel-coronavirus/covid-19-current-cases") if err != nil { return cS, err } doc := soup.HTMLParse(resp) tables := doc.FindAll("table") stats := tables[0].FindAll("tr") if len(stats) != 8 { //nolint:gomnd return cS, fmt.Errorf("stats table has %v TR, not 8", len(stats)) } //for i, s := range stats { // a, b ,_ := parseStat(s) // fmt.Println(i,a,b) //} cS.ConfirmedCasesTotal, cS.ConfirmedCasesNew24h, err = parseStat(stats[1]) if err != nil { return cS, fmt.Errorf("problem parsing confirmed cases %w", err) } cS.ProbableCasesTotal, cS.ProbableCasesNew24h, err = parseStat(stats[2]) if err != nil { return cS, fmt.Errorf("problem parsing probable cases %w", err) } //cS.HospitalisedCasesTotal, cS.HospitalisedCasesNew24h, err = parseStat(stats[4]) //parseStatHosp(stats[4]) //if err != nil { // return cS, fmt.Errorf("problem parsing hospitalised cases %w", err) //} cS.RecoveredCasesTotal, cS.RecoveredCasesNew24h, err = parseStat(stats[4]) if err != nil { return cS, fmt.Errorf("problem parsing recovered cases %w", err) } cS.DeathCasesTotal, cS.DeathCasesNew24h, err = parseStat(stats[6]) if err != nil { return cS, fmt.Errorf("problem parsing dead cases %w", err) } return cS, nil } func RenderCaseStats(cS CaseStatsResponse, viewType string) (string, error) { var sb strings.Builder if viewType != JSONRenderType { return "", InvalidUsageError{fmt.Sprintf("unknown view type: %v", viewType)} } b, err := json.MarshalIndent(cS, "", " ") if err != nil { return "", err } sb.Write(b) return sb.String(), nil }