content
stringlengths
10
4.9M
/** * Creates and returns a new list of updates for this model. * * @return the list of updates */ public List<Bson> toUpdates() { var updates = new ArrayList<Bson>(); appendUpdates(updates); return updates; }
/** * Generates a HMAC digest of the key and data by using the given * algorithm. */ static cbsasl_error_t HMAC_digest(cbsasl_auth_mechanism_t auth_mech, const unsigned char *key, unsigned int keylen, const unsigned char *data, unsigned int datalen, unsigned char *digest, unsigned int *digestlen) { switch (auth_mech) { case SASL_AUTH_MECH_SCRAM_SHA1: if (HMAC(EVP_sha1(), key, keylen, data, datalen, digest, digestlen) == NULL) { return SASL_FAIL; } break; case SASL_AUTH_MECH_SCRAM_SHA256: if (HMAC(EVP_sha256(), key, keylen, data, datalen, digest, digestlen) == NULL) { return SASL_FAIL; } break; case SASL_AUTH_MECH_SCRAM_SHA512: if (HMAC(EVP_sha512(), key, keylen, data, datalen, digest, digestlen) == NULL) { return SASL_FAIL; } break; default: return SASL_BADPARAM; } return SASL_OK; }
package com.retheviper.springbootsample.api.v1.controller.board; import com.retheviper.springbootsample.api.v1.form.board.CommentForm; import com.retheviper.springbootsample.api.v1.viewmodel.board.CommentViewModel; import com.retheviper.springbootsample.application.dto.board.ArticleDto; import com.retheviper.springbootsample.application.dto.board.CommentDto; import com.retheviper.springbootsample.application.service.board.CommentService; import lombok.RequiredArgsConstructor; import org.modelmapper.ModelMapper; import org.springframework.http.HttpStatus; import org.springframework.security.access.prepost.PostAuthorize; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; import java.util.List; import java.util.stream.Collectors; /** * Article comment controller class. * * @author retheviper */ @RestController @RequiredArgsConstructor @RequestMapping("api/v1/web/boards/{boardId}/articles/{articleId}/comments") public class CommentApiController { /** * Data model converter */ private final ModelMapper mapper; /** * Board article's comment service */ private final CommentService service; /** * Get list of comments. * * @return list of comments */ @GetMapping @ResponseStatus(HttpStatus.OK) public List<CommentViewModel> listComment(@PathVariable final long articleId) { return this.service.listComment(articleId).stream().map(this::createViewModel) .collect(Collectors.toList()); } /** * Get single comment. * * @param commentId comment ID * @return requested single comment */ @GetMapping("/{commentId}") @ResponseStatus(HttpStatus.OK) public CommentViewModel getComment(@PathVariable final long commentId) { return createViewModel(this.service.getComment(commentId)); } /** * Create single comment. * * @param form request form * @return created single comment */ @PostMapping @ResponseStatus(HttpStatus.CREATED) public CommentViewModel createComment(@PathVariable final long articleId, @Validated @RequestBody final CommentForm form) { final CommentDto dto = this.mapper.map(form, CommentDto.class); final ArticleDto article = new ArticleDto(); article.setId(articleId); dto.setArticle(article); return createViewModel(this.service.createComment(dto)); } /** * Update single comment. * * @param commentId comment ID * @param form request form * @return updated single comment */ @PutMapping("/{commentId}") @ResponseStatus(HttpStatus.OK) @PostAuthorize("returnObject.createdBy == authentication.principal.username") public CommentViewModel updateComment(@PathVariable final long articleId, @PathVariable final long commentId, @Validated @RequestBody final CommentForm form) { final CommentDto dto = this.mapper.map(form, CommentDto.class); dto.setId(commentId); final ArticleDto article = new ArticleDto(); article.setId(articleId); dto.setArticle(article); return createViewModel(this.service.updateComment(dto)); } /** * Delete existing single comment. * * @param commentId comment ID */ @DeleteMapping("/{commentId}") @ResponseStatus(HttpStatus.OK) public void deleteComment(@PathVariable final long commentId) { this.service.deleteComment(commentId); } /** * Create view model. * * @param dto target DTO * @return generated view model */ private CommentViewModel createViewModel(final CommentDto dto) { return this.mapper.map(dto, CommentViewModel.class); } }
def split_episodes(meas_file): f = open(meas_file, "rU") header_details = f.readline() header_details = header_details.strip().split(',') f.close() print (header_details) details_matrix = np.loadtxt(open(meas_file, "rb"), delimiter=",", skiprows=1) print (details_matrix) previous_pos = [details_matrix[0, header_details.index('pos_x')], details_matrix[0, header_details.index('pos_y')]] episode_positions_matrix = [] positions_vector = [] travelled_distances = [] travel_this_episode = 0 previous_start_point = details_matrix[0, header_details.index('start_point')] previous_end_point = details_matrix[0, header_details.index('end_point')] previous_repetition = details_matrix[0, header_details.index('rep')] for i in range(1, len(details_matrix)): point = [details_matrix[i, header_details.index('pos_x')], details_matrix[i, header_details.index('pos_y')]] start_point = details_matrix[i, header_details.index('start_point')] end_point = details_matrix[i, header_details.index('end_point')] repetition = details_matrix[i, header_details.index('rep')] positions_vector.append(point) if (previous_start_point != start_point and end_point != previous_end_point) or \ repetition != previous_repetition: travelled_distances.append(travel_this_episode) travel_this_episode = 0 positions_vector.pop() episode_positions_matrix.append(positions_vector) positions_vector = [] travel_this_episode += sldist(point, previous_pos) previous_pos = point previous_start_point = start_point previous_end_point = end_point previous_repetition = repetition return episode_positions_matrix, travelled_distances
#include "mpi.h" #include <iostream> void printArray(int* arr, int n) { for (int i = 0; i < n; i++) { std::cout << arr[i] << " "; } std::cout << std::endl; } void initArray(int* arr, int n) { for (int i = 0; i < n; i++) { arr[i] = rand() % 10; } } int main(int argc, char** argv) { int procRank, procNum, residue, n; int sizes[2]; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &procNum); MPI_Comm_rank(MPI_COMM_WORLD, &procRank); int* mainArr = NULL; int* sendcounts = NULL; int* displs = NULL; double result; if (procRank == 0) { std::cout << "\n\nEnter the size of the array (divisible by 100 and more than " << procNum*100 << "):"; fflush(stdout); std::cin >> n; fflush(stdout); if (n % 100 != 0) { std::cout << "Error! Array size is not a divisible by 100." << std::endl; fflush(stdout); MPI_Abort(MPI_COMM_WORLD, MPI_ERR_OTHER); } if (n < (procNum * 100)) { std::cout << "Error! Array size is less than " << procNum*100 << std::endl; fflush(stdout); MPI_Abort(MPI_COMM_WORLD, MPI_ERR_OTHER); } mainArr = new int[n]; initArray(mainArr, n); //std::cout << "\n\nMain array:" << std::endl; //printArray(mainArr, n); residue = n - (procNum * 100); //std::cout << "\n\nResidue: " << residue << std::endl; sendcounts = new int[procNum]; displs = new int[procNum]; for (int i = 0; i < procNum; i++) { sendcounts[i] = 100; sizes[0] = sendcounts[i]; if (i == 0) displs[i] = 0; if (i != 0) displs[i] = displs[i - 1] + sendcounts[i - 1]; if ((residue != 0) && (i == procNum - 1)) sendcounts[i] += residue; sizes[1] = sendcounts[i]; } //printArray(sendcounts, procNum); //printArray(displs, procNum); //printArray(sizes, 2); } MPI_Bcast(&sizes, 2, MPI_INT, 0, MPI_COMM_WORLD); int myBlockSize; myBlockSize = (procRank == (procNum - 1)) ? sizes[1] : sizes[0]; int* res = new int[myBlockSize]; MPI_Scatterv(mainArr, sendcounts, displs, MPI_INT, res, myBlockSize, MPI_INT, 0, MPI_COMM_WORLD); std::cout << "\n\nArray:" << std::endl; printArray(res, myBlockSize); double MatOj = 0; double Disp = 0; for (int i = 0; i < myBlockSize; i++) MatOj = res[i] + MatOj; MatOj = MatOj / myBlockSize; // математическое ожидание for (int i = 0; i < myBlockSize; i++) Disp = pow((res[i] - MatOj), 2) + Disp; Disp = Disp / (myBlockSize-1); // дисперсия std::cout << "Local dispersion:" << Disp << std::endl; MPI_Reduce(&Disp, &result, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); if (procRank == 0) { std::cout << "Result:" << result << std::endl; fflush(stdout); } MPI_Finalize(); return 0; }
<reponame>zxc123zxc/mgit module Foreign.LibGit.Status ( GitStatusList(..), GitDiffFile(..), GitDiffDelta(..), GitStatusEntry(..), c_git_status_list_new_integr, c_git_status_byindex, c_git_status_list_entrycount, c_git_status_list_free ) where import System.IO (IO) import Text.Show (Show) import Foreign import Foreign.C.Types import Foreign.C.String (CString) import Foreign.CStorable import Foreign.CVector import Foreign.CStorableWrap import Foreign.LibGit.Models import GHC.Generics (Generic) -- | Status Foreign DTOs data GitStatusList = GitStatusList deriving stock (Generic) deriving anyclass (CStorable) data GitDiffFile = GitDiffFile { id :: StorableWrap (CVector 20 CUChar), path :: CString, size :: CULong, flags :: CUInt, mode :: CUShort, id_abbrev :: CUShort } deriving stock (Generic) deriving anyclass (CStorable) deriving (Storable) via (CStorableWrapper GitDiffFile) data GitDiffDelta = GitDiffDelta { deltaDiffStatus :: !CUInt, -- git_delta_t deltaFlags :: !CUInt, deltaSimilarity :: !CUShort, deltaNfiles :: !CUShort, oldFile :: !GitDiffFile, newFile :: !GitDiffFile } deriving stock (Generic) deriving anyclass (CStorable) deriving (Storable) via (CStorableWrapper GitDiffDelta) data GitStatusEntry = GitStatusEntry { status :: !CUInt, headToIndex :: !(Ptr GitDiffDelta), indexToWorkDir :: !(Ptr GitDiffDelta) } deriving stock (Generic, Show) deriving anyclass (CStorable) deriving (Storable) via (CStorableWrapper GitStatusEntry) -- | Status functions: -- int git_status_list_new_integr(git_status_list **out, git_repository *repo) foreign import ccall "git_integr.h git_status_list_new_integr" c_git_status_list_new_integr :: Ptr (Ptr GitStatusList) -> Ptr GitRepo -> IO CInt -- const git_status_entry * git_status_byindex(git_status_list *statuslist, size_t idx); foreign import ccall "git2/status.h git_status_byindex" c_git_status_byindex :: Ptr GitStatusList -> CSize -> IO (Ptr GitStatusEntry) -- size_t git_status_list_entrycount(git_status_list *statuslist); foreign import ccall "git2/status.h git_status_list_entrycount" c_git_status_list_entrycount :: Ptr GitStatusList -> IO CSize -- void git_status_list_free(git_status_list *statuslist); foreign import ccall "git2/status.h git_status_list_free" c_git_status_list_free :: Ptr GitStatusList -> IO ()
// This helper method initializes services, apps, metrics and usages in the apisonator for the tests. func (suite *SingletonFlushTestSuite) initializeApisonatorState() { require.Eventually(suite.T(), func() bool { fmt.Printf("Creating service with service_id: %s, service_token:%s", suite.ServiceID, suite.ServiceToken) serviceErr := suite.backend.Push("service", []interface{}{suite.ServiceID, suite.ServiceToken}) if serviceErr != nil { return false } return true }, 4*time.Second, 500*time.Millisecond, "Error creating the service") for _, app := range suite.AppIDs { require.Eventually(suite.T(), func() bool { err := suite.backend.Push("app", []interface{}{suite.ServiceID, app, suite.PlanID}) if err != nil { return false } return true }, 4*time.Second, 500*time.Millisecond, "Error creating the app") } require.Eventually(suite.T(), func() bool { err := suite.backend.Push("metrics", []interface{}{suite.ServiceID, &suite.Metrics}) if err != nil { return false } return true }, 4*time.Second, 500*time.Millisecond, "Error adding metrics") require.Eventually(suite.T(), func() bool { err := suite.backend.Push("usage_limits", []interface{}{suite.ServiceID, suite.PlanID, &suite.Metrics}) if err != nil { return false } return true }, 4*time.Second, 500*time.Millisecond, "Error adding usage limits") }
import * as React from "react"; import { FiInfo } from "react-icons/fi"; import * as Bootstrap from "react-bootstrap"; export interface IInfoProps { id: string; title: string; detail: string | JSX.Element; } export default class Info extends React.Component<IInfoProps> { render() { return ( <Bootstrap.OverlayTrigger trigger="hover" placement="auto" overlay={ <Bootstrap.Popover id={this.props.id}> <Bootstrap.Popover.Title>{this.props.title}</Bootstrap.Popover.Title> <Bootstrap.Popover.Content>{this.props.detail}</Bootstrap.Popover.Content> </Bootstrap.Popover> } > <FiInfo /> </Bootstrap.OverlayTrigger> ); } }
def load_texture(filename): textureSurface = pygame.image.load(filename) textureData = pygame.image.tostring(textureSurface,"RGBA",1) width = textureSurface.get_width() height = textureSurface.get_height() ID = glGenTextures(1) glBindTexture(GL_TEXTURE_2D,ID) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,textureData) return ID
<gh_stars>0 /** * Copyright 2012 NetDigital Sweden AB * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.nginious.http.server; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; import com.nginious.http.HttpCookie; import com.nginious.http.HttpMethod; import com.nginious.http.HttpRequest; import com.nginious.http.HttpSession; import com.nginious.http.upload.FilePart; import com.nginious.http.upload.UploadTracker; public class HttpTestRequest implements HttpRequest { private HttpMethod method; private String version; private String path; private String queryString; private String charEncoding; private HashMap<String, HttpCookie> cookies; private HashMap<String, List<String>> headers; private HashMap<String, List<String>> params; private HashMap<String, Object> attributes; private byte[] body; private InputStream stream; private BufferedReader reader; public HttpTestRequest() { super(); this.cookies = new HashMap<String, HttpCookie>(); this.headers = new HashMap<String, List<String>>(); this.params = new HashMap<String, List<String>>(); this.attributes = new HashMap<String, Object>(); } public String getHeader(String name) { List<String> values = headers.get(name); return values != null ? values.get(0) : null; } public String[] getHeaderNames() { return headers.keySet().toArray(new String[headers.size()]); } public void addHeader(String name, String value) { List<String> values = null; if(!headers.containsKey(name)) { values = new ArrayList<String>(); headers.put(name, values); } else { values = headers.get(name); } values.add(value); } public HttpCookie getCookie(String name) { return cookies.get(name); } public HttpCookie[] getCookies() { return cookies.values().toArray(new HttpCookie[cookies.size()]); } public void addCookie(HttpCookie cookie) { cookies.put(cookie.getName(), cookie); } public void prepareUploadTracker() { return; } public UploadTracker getUploadTracker() { return null; } public HttpMethod getMethod() { return this.method; } public void setMethod(HttpMethod method) { this.method = method; } public String getVersion() { return this.version; } public void setVersion(String version) { this.version = version; } public String getPath() { return this.path; } public void setPath(String path) { this.path = path; } public String getQueryString() { return this.queryString; } public HttpSession getSession() { return null; } public HttpSession getSession(boolean create) { return null; } public void setQueryString(String queryString) { this.queryString = queryString; } public String getCharacterEncoding() { return this.charEncoding; } public void setCharacterEncoding(String charEncoding) { this.charEncoding = charEncoding; } public int getContentLength() { return this.body == null ? 0 : body.length; } public String getContentType() { return getHeader("Content-Type"); } public InputStream getInputStream() throws IOException { if(this.reader != null) { throw new IOException("Reader created"); } if(this.stream == null) { byte[] content = this.body == null ? new byte[0] : this.body; this.stream = new ByteArrayInputStream(content); } return this.stream; } public BufferedReader getReader() throws IOException { if(this.stream != null) { throw new IOException("Stream created"); } if(this.reader == null) { byte[] content = this.body == null ? new byte[0] : this.body; this.reader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(content))); } return this.reader; } public void setContent(byte[] body) { this.body = body; this.stream = null; this.reader = null; } public Locale getLocale() { return Locale.getDefault(); } public String getParameter(String name) { List<String> list = params.get(name); return list != null ? list.get(0) : null; } public String[] getParameterValues(String name) { List<String> list = params.get(name); return list != null ? list.toArray(new String[list.size()]) : null; } public String[] getParameterNames() { return params.keySet().toArray(new String[params.size()]); } public void setAttribute(String name, Object value) { attributes.put(name, value); } public Object getAttribute(String name) { return attributes.get(name); } public Object removeAttribute(String name) { return attributes.remove(name); } public FilePart getFile(String name) { return null; } public Collection<FilePart> getFiles() { return null; } public void setParameter(String name, String value) { params.remove(name); addParameter(name, value); } public void addParameter(String name, String value) { List<String> values = null; if(!params.containsKey(name)) { values = new ArrayList<String>(); params.put(name, values); } else { values = params.get(name); } values.add(value); } public String getProtocol() { return "http"; } public String getScheme() { return "http"; } public String getRemoteAddress() { return "localhost"; } public String getRemoteHost() { return "127.0.0.1"; } public int getRemotePort() { return 80; } public void dispatch(String path) { return; } }
def check_vulnerabilities(banner, filename): with open(filename, 'r') as f: for line in f.readlines(): if line.strip('\n') in banner: print "[+] Server is vulnerable: {}".format(banner.strip('\n'))
/** * Test class for local ZigBee API. */ public class ZigBeeApiCc2531ImplTest { /** * The LOGGER. */ private final static org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ZigBeeApiCc2531ImplTest.class); /** * Tests local ZigBee API. */ @Test @Ignore public void testZigBeeApiLocal() { final SerialPort port = new SerialPortImpl("COM5"); final ZigBeeDongle dongle = new ZigBeeDongleTiCc2531Impl(port, 4951, 11, null, false); final ZigBeeApiDongleImpl api = new ZigBeeApiDongleImpl(dongle, false); api.getNetworkState().addNetworkListener(new ZigBeeNetworkStateListener() { @Override public void deviceAdded(ZigBeeDevice device) { LOGGER.info("Device added: " + device); } @Override public void deviceUpdated(ZigBeeDevice device) { LOGGER.info("Device updated: " + device); } @Override public void deviceRemoved(ZigBeeDevice device) { LOGGER.info("Device removed: " + device); } }); api.startup(); try { Thread.sleep(10000); } catch (InterruptedException e) { e.printStackTrace(); } final List<ZigBeeDevice> devices = api.getNetworkState().getDevices(); for (final ZigBeeDevice device : devices) { LOGGER.info(device.toString()); } api.shutdown(); port.close(); } }
// Close closes the stream. // Calling methods on a closed stream may panic. func (r *RecordStream) Close() { r.c.c.Request(&proto.DeleteRecordStream{StreamIndex: r.index}, nil) r.running = false r.c = nil }
<gh_stars>10-100 package global import ( "github.com/TrashPony/Veliri/src/mechanics/factories/boxes" "github.com/TrashPony/Veliri/src/mechanics/gameObjects/boxInMap" "github.com/TrashPony/Veliri/src/mechanics/gameObjects/player" "github.com/TrashPony/Veliri/src/mechanics/globalGame" "github.com/TrashPony/Veliri/src/mechanics/globalGame/box" "github.com/TrashPony/Veliri/src/mechanics/globalGame/game_math" ) func placeNewBox(user *player.Player, msg Message) { // устанавливать ящики может только мп err, newBox := box.PlaceNewBox(user, msg.Slot, msg.BoxPassword) if err != nil { go SendMessage(Message{Event: "Error", Error: err.Error(), IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } else { go SendMessage(Message{Event: "UpdateInventory", IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) go SendMessage(Message{Event: "NewBox", Box: newBox, X: user.GetSquad().MatherShip.X, Y: user.GetSquad().MatherShip.Y, IDMap: user.GetSquad().MatherShip.MapID}) } } func openBox(user *player.Player, msg Message) { mapBox, mx := boxes.Boxes.Get(msg.BoxID) mx.Unlock() if mapBox != nil { dist := game_math.GetBetweenDist(user.GetSquad().MatherShip.X, user.GetSquad().MatherShip.Y, mapBox.X, mapBox.Y) if dist < 75 { // TODO проверять пароль не только при открытие ящика а еще при действие, пароль надо сохранять как сессия последнего открытого ящика if mapBox.Protect { if mapBox.GetPassword() == msg.BoxPassword || mapBox.GetPassword() == 0 { go SendMessage(Message{Event: msg.Event, BoxID: mapBox.ID, Inventory: mapBox.GetStorage(), X: mapBox.X, Y: mapBox.Y, Size: mapBox.CapacitySize, IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID, NeedCheckView: true}) } else { if msg.BoxPassword == 0 { go SendMessage(Message{Event: msg.Event, BoxID: mapBox.ID, Error: "need password", X: mapBox.X, Y: mapBox.Y, IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID, NeedCheckView: true}) } else { go SendMessage(Message{Event: "Error", BoxID: mapBox.ID, Error: "wrong password", IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } } } else { go SendMessage(Message{Event: msg.Event, BoxID: mapBox.ID, Inventory: mapBox.GetStorage(), X: mapBox.X, Y: mapBox.Y, Size: mapBox.CapacitySize, IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID, NeedCheckView: true}) } } } } func useBox(user *player.Player, msg Message) { var err error var mapBox *boxInMap.Box if msg.Event == "getItemFromBox" { err, mapBox = box.GetItemFromBox(user, msg.BoxID, msg.Slot) updateBoxInfo(mapBox) } if msg.Event == "placeItemToBox" { err, mapBox = box.PlaceItemToBox(user, msg.BoxID, msg.Slot) updateBoxInfo(mapBox) } if msg.Event == "getItemsFromBox" { for _, i := range msg.Slots { err, mapBox = box.GetItemFromBox(user, msg.BoxID, i) updateBoxInfo(mapBox) } } if msg.Event == "placeItemsToBox" { for _, i := range msg.Slots { err, mapBox = box.PlaceItemToBox(user, msg.BoxID, i) updateBoxInfo(mapBox) } } if err != nil { go SendMessage(Message{Event: "Error", Error: err.Error(), IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } go SendMessage(Message{Event: "UpdateInventory", IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } func boxToBox(user *player.Player, msg Message) { if msg.BoxID == msg.ToBoxID { return } if msg.Event == "boxToBoxItem" { err, getBox, toBox := box.BoxToBox(user, msg.BoxID, msg.Slot, msg.ToBoxID) if err != nil { go SendMessage(Message{Event: "Error", Error: err.Error(), IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } else { updateBoxInfo(getBox) updateBoxInfo(toBox) } } if msg.Event == "boxToBoxItems" { for _, i := range msg.Slots { err, getBox, toBox := box.BoxToBox(user, msg.BoxID, i, msg.ToBoxID) if err != nil { go SendMessage(Message{Event: "Error", Error: err.Error(), IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } else { updateBoxInfo(getBox) updateBoxInfo(toBox) } } } go SendMessage(Message{Event: "UpdateInventory", IDUserSend: user.GetID(), IDMap: user.GetSquad().MatherShip.MapID}) } func updateBoxInfo(box *boxInMap.Box) { if box == nil { return } users, rLock := globalGame.Clients.GetAll() defer rLock.Unlock() for _, user := range users { dist := game_math.GetBetweenDist(user.GetSquad().MatherShip.X, user.GetSquad().MatherShip.Y, box.X, box.Y) if dist < 175 { // что бы содержимое ящика не видили те кто далеко go SendMessage(Message{Event: "UpdateBox", IDUserSend: user.GetID(), BoxID: box.ID, X: box.X, Y: box.Y, Inventory: box.GetStorage(), Size: box.CapacitySize, IDMap: user.GetSquad().MatherShip.MapID, NeedCheckView: true}) } } }
/** * Tests editing a inventory item and giving it a productId that doesn't exist */ @Test void editItemInvalidProductId() throws Exception { Product product = productRepository.findById(new ProductId("p1", businessId)).orElseThrow(); InventoryItem item = new InventoryItem(product, 2, 2.00, 1.80, "2021-04-20", null, null, "2023-05-20"); inventoryItemRepository.save(item); JSONObject testItem = new JSONObject(); testItem.put("productId", "NotInProducts"); RequestBuilder postInventoryRequest = MockMvcRequestBuilders .put("/businesses/{businessId}/inventory/{inventoryItemId}", businessId, item.getId()) .content(testItem.toString()) .contentType(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON) .with(httpBasic("[email protected]", "1337-H%nt3r2")); MvcResult postInventoryResponse = this.mockMvc.perform(postInventoryRequest) .andExpect(MockMvcResultMatchers.status().isBadRequest()) .andReturn(); String returnedExceptionString = postInventoryResponse.getResponse().getContentAsString(); Assertions.assertEquals(new NoProductExistsException("NotInProducts", businessId).getMessage(), returnedExceptionString); }
Note: This story was edited at 14.00hrs, to include some counter arguments to our angry rant. You can read them at the bottom of the story. Seven weeks ago, the Conservative party won Britain’s general election on a manifesto which pledged to rebalance Britain’s economy and turn the cities of England’s M62 corridor into a “Northern Powerhouse”. A lot of that policy has turned out to be PR fluff, of the standing-about-on-building-sites-in-high-viz-jackets variety. But at the core of it were two big and concrete ideas. One was devolving power; the other was a major investment in the region’s transport links, to make it possible to live in, say, Bradford, and commute to work in Manchester. Anyway. Yesterday, this happened: The government says it will delay or cut back a number of modernisation projects planned for Network Rail. Transport Secretary Patrick McLoughlin says rising costs and missed targets make the £38.5bn five-year plan untenable. He blamed Network Rail, saying it should have foreseen the improvements would cost more and take longer. (That’s from the BBC.) There were three major rail upgrades included in this plan. One is the electrification of the core section of the Transpennine route, between Manchester and Leeds. That’s been shelved. Another is the electrification of the Midland mainline, between Bedford, Nottingham and Sheffield. (The section south of Bedford, which serves the London commuter market, was electrified years ago). That’s been shelved too. The third is electrification of the Great Western line, from London towards Oxford, Bristol and Cardiff. Has that been shelved too? Here’s McLoughin again: Electrification of the Great Western line is a top priority and I want Network Rail to concentrate its efforts on getting that right. It’s not that we’re cynical about the government’s commitment to the Northern Powerhouse, exactly. But, well: While we’re not being cynical, check out this map from Network Rail’s latest annual report, showing its major infrastructure projects in 2014-15. One project in Scotland, one in the north, two in the Midlands... and four in London. I live in London. I work in London. I have been a Londoner all my life. But come on. The government has framed these cuts as an emergency response to problems at Network Rail: yesterday, it also announced that the body’s current chair Richard Parry-Jones was stepping down, to be replaced by London’s transport commissioner, Sir Peter Hendy. The implication is that ministers would just love to continue with the investment plan they announced last year, but simply can’t afford it. But there are two problems with this. One is that Network Rail has said it knew early last year that the five-year plan would be “incredibly difficult to deliver”. The Conservative party must have known that, even as it made all those manifesto commitments to investing in the north. The other, bigger problem is that it is very, very obvious that schemes affecting the north have been axed, while those restricted to the south haven’t. From an economic modelling perspective this probably makes sense: as we’ve explained before, projects affecting a big and rich city like London will always do better in a cost:benefit analysis than those that don’t. But it nonetheless means that rich southern commuters are getting investment, while less rich northern ones are stuck with the same crappy service they’ve had for decades. If the government or its civil service are really serious about rebalancing Britain’s economy, and creating a northern powerhouse, this is a funny way of going about it. EDIT: 14.00hrs Pains us though it does to say it, we might have been a little unfair here. On our Facebook page (where you should definitely all like us, by the way), Nick Kingsley, the managing editor of Railway Gazette International, and a northerner himself, has got in touch to point out the other side of the story. You can read his full comments on Facebook, but here, reposted with Nick’s permission, are the key points: 1) You don't cancel projects that have already been started. “...the main reason the Great Western project is proceeding is because it’s well in hand, not because it is in the south. Neither the [Midland Main Line] nor [Transpennine] projects have yet seen serious work begin in earnest.” 2) Electrification isn't all it’s cracked up to be anyway. “...it mainly delivers benefit to the rail asset manager itself, ie. Network Rail, by reducing track wear and maintenance cost. “Yes, journeys get a bit quieter and a bit more pleasant, possibly through new rolling stock (though not always). But electrification alone does not deliver a step change in capacity or connectivity.” 3) It's better to take a bit longer but do things right. “On the Manchester – Leeds corridor, there was serious concern that electrification on the existing alignment would worsen a capacity squeeze that sees the limited-stop inter-city service taking capacity away from trains serving local stations. “As the Transport Secretary’s statement clearly implied, it might be better to come up with a much more robust rail investment programme which encompasses the HS3 idea [an entirely new Transpennine line] before we embark on a tricky, potentially flawed "patch and mend" job on just one of the five trans-Pennine rail arteries.” Valid points all. So perhaps, in the short term, this is the correct decision. It remains striking, nonetheless, how many of the projects that were already underway, and therefore untouchable – the Great Western, Thameslink, Crossrail – are the ones affecting London. HS3, by contrast, is little more than a whizzy brand name at this point. Rather like the Northern Powerhouse itself. Jonn Elledge is the editor of CityMetric. He is on Twitter, far too much, as @jonnelledge.
#![no_std] pub mod config; pub mod measurement; /// Messages Overview /// Description Default Length Remark /// CAN-ID DLC /// IVT_Msg_Command 0x411 8 Function commands, SET and GET commands. A command-ID-byte is included for identification /// IVT_Msg_Debug 0x510 8 Message only for internal use /// IVT_Msg_Response 0x511 8 Response to SET and GET command messages. A response-ID-byte is included for identification /// IVT_Msg_Result_I 0x521 6 Current /// IVT_Msg_Result_U1 0x522 6 Voltage 1 /// IVT_Msg_Result_U2 0x523 6 Voltage 2 /// IVT_Msg_Result_U3 0x524 6 Voltage 3 /// IVT_Msg_Result_T 0x525 6 Temperature /// IVT_Msg_Result_W 0x526 6 Power(referring to current and voltage U1) /// IVT_Msg_Result_As 0x527 6 Current counter /// IVT_Msg_Result_Wh 0x528 6 Energy counter(referring to current and voltage U1) /// * Not used bytes in response messages are undefined and reported as 0x00. /// * Not used / undefined bytes in command messages must be set to 0x00. /// * Each defined command will report its response message even if there was no change done or is currently not allowed (e.g. set configuration during run mode). This is done to give acknowledge toi the sender. /// * Consecutive commands must be sent not faster than 2ms, or you can wait until the related responseis sent. /// * Response messages must be available on the bus (free bus) at least +500ms after the relatedcommand, if not otherwise specified. /// * If not otherwise mentioned byte orders are Big Endian. /// Multiplexable Messages /// All Messages sent by the IVT shall be unique identifiable by the first databyte sent as muxbyte. /// DB0 (Muxbyte) Remark /// 0x0n Results (measured or calculated) /// 0x1n Set CAN ID /// 0x2n Set config result /// 0x3n Set commands /// 0x4n Get error/log data /// 0x5n Get CAN ID /// 0x6n Get configresult /// 0x7n Get commands /// 0x8n Response on error/log data /// 0x9n Responses on CAN ID /// 0xAn Responses on Config Result /// 0xBn Responses on Set and Get Comma /// 0xFF Response on not allowed message /// Capabilities: /// Decodes data on configured IDs /// Decodes basic error flags on incoming data. /// Send query for sensor serial #s. #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, PartialOrd, Ord)] pub enum Endianness { /// Little Endian Little, /// Big Endian Big, } #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
import glob import os import click import pandas as pd import scphylo as scp from scphylo.ul._servers import cmd, write_cmds_get_main @click.command(short_help="Run mpileup.") @click.argument( "outdir", required=True, type=click.Path( exists=True, file_okay=False, dir_okay=True, readable=True, resolve_path=True ), ) @click.argument( "ref", required=True, type=click.Choice(scp.settings.refs), ) @click.option( "--time", default="5-00:00:00", type=str, show_default=True, help="Time.", ) @click.option( "--mem", default="150", type=str, show_default=True, help="Memory.", ) @click.option( "--afterok", default=None, type=str, show_default=True, help="Afterok.", ) def mpileup(outdir, ref, time, mem, afterok): """Run mpileup. scphylo mpileup /path/to/in/dir hg19|hg38|mm10 BAM files (*.markdup_bqsr.bam) --> VCF file (_pileupcalls.vcf) """ if ref == "hg19": config = scp.settings.hg19 elif ref == "mm10": config = scp.settings.mm10 elif ref == "hg38": config = scp.settings.hg38 elif ref == "m10x": config = scp.settings.m10x def cmds(samples): cmds = "" cmds += cmd([f"module load {scp.settings.tools['bcftools']}"]) files = " ".join([f"{outdir}/{s}.markdup_bqsr.bam" for s in samples]) cmds += cmd( [ "bcftools mpileup", "--min-BQ 30", "--min-MQ 0", "--count-orphans", "--ignore-overlaps", "--output-type u", "--max-depth 5000", '--annotate "DP,AD"', "--threads $SLURM_CPUS_PER_TASK", f"-f {config['ref']}", files, "|", "bcftools call", "--output-type v", # "---multiallelic-caller", "--consensus-caller", "--variants-only", f"-o {outdir}/_pileupcalls.vcf", ] ) cmds += cmd(["echo Done!"], islast=True) return cmds files = glob.glob(f"{outdir}/*.markdup_bqsr.bam") temp = [] for file in files: file = file.split("/")[-1].replace(".markdup_bqsr.bam", "") temp.append(file) df_cmds = pd.DataFrame() df_cmds["cmd"] = [cmds(temp)] cmdmain = write_cmds_get_main( df_cmds, "mpileup", time, mem, None, 16, scp.settings.tools["email"], f"{outdir}/_tmp", afterok, ) os.system(cmdmain) return None
/*--------------------------------------------------------------------------------------------- * Copyright (c) Bentley Systems, Incorporated. All rights reserved. * See LICENSE.md in the project root for license terms and full copyright notice. *--------------------------------------------------------------------------------------------*/ import { SvgFlag, SvgFolderOpened, SvgHome, SvgPlaceholder, SvgSettings, } from '@itwin/itwinui-icons-react'; import { useState } from '@storybook/addons'; import { Story, Meta } from '@storybook/react'; import React from 'react'; import { SideNavigation, SidenavButton, SideNavigationProps, } from '../../src/core'; export default { component: SideNavigation, subcomponents: { SidenavButton }, argTypes: { className: { control: { disable: true } }, style: { control: { disable: true } }, id: { control: { disable: true } }, items: { control: { disable: true } }, secondaryItems: { control: { disable: true } }, }, args: { style: { height: 'calc(100vh - 24px) ' } }, title: 'Core/SideNavigation', } as Meta<SideNavigationProps>; export const Basic: Story<SideNavigationProps> = (args) => { return ( <SideNavigation {...args} items={[ <SidenavButton startIcon={<SvgHome />} key={0}> Home </SidenavButton>, <SidenavButton startIcon={<SvgFlag />} key={1}> Issues </SidenavButton>, <SidenavButton startIcon={<SvgFolderOpened />} key={2} disabled> Documents </SidenavButton>, ]} secondaryItems={[ <SidenavButton startIcon={<SvgSettings />} key={3}> Settings </SidenavButton>, ]} /> ); }; export const ActiveItem: Story<SideNavigationProps> = (args) => { const [activeIndex, setActiveIndex] = useState<number>(0); const mainItems = [...Array(3).fill(null)].map((_, index) => ( <SidenavButton startIcon={<SvgPlaceholder />} key={index} isActive={activeIndex === index} onClick={() => setActiveIndex(index)} > {`App ${index}`} </SidenavButton> )); return ( <SideNavigation {...args} items={mainItems} secondaryItems={[ <SidenavButton startIcon={<SvgSettings />} key={3}> Settings </SidenavButton>, ]} /> ); };
/** * Non-API analysis data container; used to pass information about a single * analysis (detail) back and forth between the service and data factory */ public class AnalysisDetailWithUser extends AnalysisDetailImpl { private long _userId; public AnalysisDetailWithUser(ResultSet rs) throws SQLException { UserDataFactory.populateDetailFields(this, rs); } public AnalysisDetailWithUser(long ownerId, AnalysisListPostRequest request) { setInitializationFields(ownerId); setBaseFields(request); setDescriptor(request.getDescriptor()); setNotes(request.getNotes()); setIsPublic(request.getIsPublic()); } public AnalysisDetailWithUser(long ownerId, AnalysisDetailWithUser source, AccountDataPair provenanceOwner) { setInitializationFields(ownerId); setBaseFields(source); setDescriptor(source.getDescriptor()); setNotes(source.getNotes()); setIsPublic(false); setProvenance(createProvenance(source, provenanceOwner)); } private static AnalysisProvenance createProvenance(AnalysisDetailWithUser source, AccountDataPair provenanceOwner) { AnalysisProvenance provenance = new AnalysisProvenanceImpl(); OnImportProvenanceProps importProps = new OnImportProvenancePropsImpl(); importProps.setAnalysisId(source.getAnalysisId()); importProps.setAnalysisName(source.getDisplayName()); importProps.setOwnerId(source.getUserId()); importProps.setOwnerName(provenanceOwner.getName()); importProps.setOwnerOrganization(provenanceOwner.getOrganization()); importProps.setCreationTime(source.getCreationTime()); importProps.setModificationTime(source.getModificationTime()); importProps.setIsPublic(source.getIsPublic()); provenance.setOnImport(importProps); return provenance; } private void setInitializationFields(long ownerId) { String now = Utils.getCurrentDateTimeString(); setUserId(ownerId); setCreationTime(now); setModificationTime(now); setAnalysisId(IdGenerator.getNextAnalysisId()); } private void setBaseFields(AnalysisBase base) { setDisplayName(checkMaxSize(50, "displayName", checkNonEmpty("displayName", base.getDisplayName()))); setDescription(checkMaxSize(4000, "description", base.getDescription())); setStudyId(checkMaxSize(50, "studyId", checkNonEmpty("studyId", base.getStudyId()))); setStudyVersion(checkMaxSize(50, "studyVersion", base.getStudyVersion())); // TODO: will eventually need to be non-empty setApiVersion(checkMaxSize(50, "apiVersion", base.getApiVersion())); // TODO: will eventually need to be non-empty } public void setDescriptor(AnalysisDescriptor descriptor) { super.setDescriptor(descriptor); setNumFilters((long)descriptor.getSubset().getDescriptor().size()); setNumComputations((long)descriptor.getComputations().size()); setNumVisualizations((long)Functions.reduce(descriptor.getComputations(), (count, next) -> count + next.getVisualizations().size(), 0)); } @JsonIgnore public long getUserId() { return _userId; } public void setUserId(long userId) { _userId = userId; } @JsonIgnore public AnalysisListPostResponse getIdObject() { AnalysisListPostResponse idObj = new AnalysisListPostResponseImpl(); idObj.setAnalysisId(getAnalysisId()); return idObj; } }
def loadAudioFile(self, filePath): self.player.stop() fileData, self.sampleRate = sf.read(filePath, dtype = 'float32') self.audioLength = len(fileData) try: self.channels = len(fileData[0]) except: self.channels = 1 self.fileTrack.loadSamples(fileData) self.notes = None self.synthesizeInstrument() self.player.loadAudioFile()
/** * Sends message to chat * @param message * @param notTo can be null */ public void send(Message message,@Nullable ServerClient notTo) { history.add(message); if(message.isSystemMessage()) { server.getConnectedClients().forEach(c -> { if(c.SAFE_CONNECTION && c!= notTo) { c.sendPacket(new ChatMessagePacket(message.getSender().getName(), message.asString())); } }); } else { server.getConnectedClients().forEach(c -> { if(c.SAFE_CONNECTION && c!=notTo) { c.sendPacket(new ChatMessagePacket("", message.asString())); } }); } }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.peering.models; import com.azure.core.util.ExpandableStringEnum; import com.fasterxml.jackson.annotation.JsonCreator; import java.util.Collection; /** The state of the IPv4 session. */ public final class SessionStateV4 extends ExpandableStringEnum<SessionStateV4> { /** Static value None for SessionStateV4. */ public static final SessionStateV4 NONE = fromString("None"); /** Static value Idle for SessionStateV4. */ public static final SessionStateV4 IDLE = fromString("Idle"); /** Static value Connect for SessionStateV4. */ public static final SessionStateV4 CONNECT = fromString("Connect"); /** Static value Active for SessionStateV4. */ public static final SessionStateV4 ACTIVE = fromString("Active"); /** Static value OpenSent for SessionStateV4. */ public static final SessionStateV4 OPEN_SENT = fromString("OpenSent"); /** Static value OpenConfirm for SessionStateV4. */ public static final SessionStateV4 OPEN_CONFIRM = fromString("OpenConfirm"); /** Static value OpenReceived for SessionStateV4. */ public static final SessionStateV4 OPEN_RECEIVED = fromString("OpenReceived"); /** Static value Established for SessionStateV4. */ public static final SessionStateV4 ESTABLISHED = fromString("Established"); /** Static value PendingAdd for SessionStateV4. */ public static final SessionStateV4 PENDING_ADD = fromString("PendingAdd"); /** Static value PendingUpdate for SessionStateV4. */ public static final SessionStateV4 PENDING_UPDATE = fromString("PendingUpdate"); /** Static value PendingRemove for SessionStateV4. */ public static final SessionStateV4 PENDING_REMOVE = fromString("PendingRemove"); /** * Creates a new instance of SessionStateV4 value. * * @deprecated Use the {@link #fromString(String)} factory method. */ @Deprecated public SessionStateV4() { } /** * Creates or finds a SessionStateV4 from its string representation. * * @param name a name to look for. * @return the corresponding SessionStateV4. */ @JsonCreator public static SessionStateV4 fromString(String name) { return fromString(name, SessionStateV4.class); } /** * Gets known SessionStateV4 values. * * @return known SessionStateV4 values. */ public static Collection<SessionStateV4> values() { return values(SessionStateV4.class); } }
/** * This method is intended to be used when giving an item (focus) and a list * of potentially related items we need to know which of these other items * are already in a specific relationship with the focus item and, * by exclusion which ones are not yet related. * * @param typeId The relationship type id to apply as a filter to the returned relationships * @param label The name of the relation as defined from the side of the 'focusItem' * @param focusUUID The uuid of the item to be checked on the side defined by 'relationshipLabel' * @param items The uuid of the items to be found on the other side of returned relationships * @param pageable The page information * @return * @throws SQLException If database error */ @SearchRestMethod(name = "byItemsAndType") public Page<RelationshipRest> findByItemsAndType( @Parameter(value = "typeId", required = true) Integer typeId, @Parameter(value = "relationshipLabel", required = true) String label, @Parameter(value = "focusItem", required = true) UUID focusUUID, @Parameter(value = "relatedItem", required = true) Set<UUID> items, Pageable pageable) throws SQLException { Context context = obtainContext(); int total = 0; List<Relationship> relationships = new LinkedList<>(); RelationshipType relationshipType = relationshipTypeService.find(context, typeId); if (Objects.nonNull(relationshipType)) { if (!relationshipType.getLeftwardType().equals(label) && !relationshipType.getRightwardType().equals(label)) { throw new UnprocessableEntityException("The provided label: " + label + " , does not match any relation!"); } relationships = relationshipService.findByItemRelationshipTypeAndRelatedList(context, focusUUID, relationshipType, new ArrayList<UUID>(items), relationshipType.getLeftwardType().equals(label), Math.toIntExact(pageable.getOffset()), Math.toIntExact(pageable.getPageSize())); total = relationshipService.countByItemRelationshipTypeAndRelatedList(context, focusUUID, relationshipType, new ArrayList<UUID>(items), relationshipType.getLeftwardType().equals(label)); } return converter.toRestPage(relationships, pageable, total, utils.obtainProjection()); }
import {deleteSchema, getJsonType} from "@tsed/common"; import {expect} from "chai"; describe("getJsonType()", () => { it("should return number", () => { expect(getJsonType(Number)).to.eq("number"); }); it("should return string", () => { expect(getJsonType(String)).to.eq("string"); }); it("should return boolean", () => { expect(getJsonType(Boolean)).to.eq("boolean"); }); it("should return array", () => { expect(getJsonType(Array)).to.eq("array"); }); it("should return string when date is given", () => { expect(getJsonType(Date)).to.eq("string"); }); it("should return object", () => { expect(getJsonType({})).to.eq("object"); }); it("should return object when class is given", () => { expect(getJsonType(class {})).to.eq("object"); }); it("should return [string] when an array is given", () => { expect(getJsonType(["string"])).to.deep.eq(["string"]); }); it("should return string when an string is given", () => { expect(getJsonType("string")).to.deep.eq("string"); }); it("should delete a schema", () => { class Test {} getJsonType(Test); deleteSchema(Test); }); });
<gh_stars>1-10 use super::{HitRecord, Hittable, AABB}; use crate::{FloatTy, Mat44, Pt3, Ray}; pub struct TransformHittable<H: Hittable> { inner: H, transform: Mat44, inverse: Mat44, } impl<H: Hittable> TransformHittable<H> { pub fn new(inner: H, transform: Mat44) -> Self { let inverse = transform.try_inverse().unwrap(); TransformHittable { inner, transform, inverse, } } } impl<H: Hittable> Hittable for TransformHittable<H> { fn bounding_box(&self) -> Option<AABB> { let aabb = if let Some(aabb) = self.inner.bounding_box() { aabb } else { return None; }; let mut min_x = None; let mut min_y = None; let mut min_z = None; let mut max_x = None; let mut max_y = None; let mut max_z = None; let points = [aabb.min, aabb.max]; for dx in &points { for dy in &points { for dz in &points { let corner = Pt3::new(dx.x, dy.y, dz.z); let trans_corner = self.transform.transform_point(&corner); if min_x.map(|m| trans_corner.x < m).unwrap_or(true) { min_x = Some(trans_corner.x); } if min_y.map(|m| trans_corner.y < m).unwrap_or(true) { min_y = Some(trans_corner.y); } if min_z.map(|m| trans_corner.z < m).unwrap_or(true) { min_z = Some(trans_corner.z); } if max_x.map(|m| trans_corner.x > m).unwrap_or(true) { max_x = Some(trans_corner.x); } if max_y.map(|m| trans_corner.y > m).unwrap_or(true) { max_y = Some(trans_corner.y); } if max_z.map(|m| trans_corner.z > m).unwrap_or(true) { max_z = Some(trans_corner.z); } } } } let min = Pt3::new(min_x.unwrap(), min_y.unwrap(), min_z.unwrap()); let max = Pt3::new(max_x.unwrap(), max_y.unwrap(), max_z.unwrap()); Some(AABB::new(min, max)) } fn is_hit_by(&self, ray: Ray, tmin: FloatTy, tmax: Option<FloatTy>) -> Option<HitRecord> { let new_ray = Ray::new( self.inverse.transform_point(&ray.origin), self.inverse.transform_vector(&ray.direction), ); if let Some(record) = self.inner.is_hit_by(new_ray, tmin, tmax) { Some(HitRecord::new( new_ray, record.t, self.transform.transform_point(&record.p), self.transform.transform_vector(&record.normal), record.u, record.v, record.material, )) } else { None } } }
/****************************************************************************//** * @file mc200_xflash.h * @brief XFLASH driver module header file. * @version V1.0.0 * @date 18-April-2012 * @author CE Application Team * * @note * Copyright (C) 2012 Marvell Technology Group Ltd. All rights reserved. * * @par * Marvell is supplying this software which provides customers with programming * information regarding the products. Marvell has no responsibility or * liability for the use of the software. Marvell not guarantee the correctness * of this software. Marvell reserves the right to make changes in the software * without notification. * *******************************************************************************/ #ifndef __MC200_XFLASH_H #define __MC200_XFLASH_H #include "mc200.h" #include "mc200_driver.h" /** @addtogroup MC200_Periph_Driver * @{ */ /** @addtogroup XFLASH * @{ */ /** @defgroup XFLASH_Public_Types XFLASH_Public_Types * @{ */ /** * @brief XFLASH interface type */ typedef enum { XFLASH_INTERFACE_QSPI1, /*!< QSPI1 is selected as the interface to access the flash */ }XFLASH_Interface_Type; /** * @brief XFLASH protection type */ typedef enum { XFLASH_PROT_NONE = 0x00, /*!< None protection */ XFLASH_PROT_UPPER_64KB = 0x04, /*!< Protect upper 64KB 0x0F0000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_128KB = 0x08, /*!< Protect upper 128KB 0x0E0000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_256KB = 0x0C, /*!< Protect upper 256KB 0x0C0000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_512KB = 0x10, /*!< Protect upper 512KB 0x080000 ~ 0x0FFFFF */ XFLASH_PROT_LOWER_64KB = 0x24, /*!< Protect lower 64KB 0x000000 ~ 0x00FFFF */ XFLASH_PROT_LOWER_128KB = 0x28, /*!< Protect lower 128KB 0x000000 ~ 0x01FFFF */ XFLASH_PROT_LOWER_256KB = 0x2C, /*!< Protect lower 256KB 0x000000 ~ 0x03FFFF */ XFLASH_PROT_LOWER_512KB = 0x30, /*!< Protect lower 512KB 0x000000 ~ 0x07FFFF */ XFLASH_PROT_ALL = 0x14, /*!< Protect all 0x000000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_4KB = 0x44, /*!< Protect upper 4KB 0x0FF000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_8KB = 0x48, /*!< Protect upper 8KB 0x0FE000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_16KB = 0x4C, /*!< Protect upper 16KB 0x0FC000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_32KB = 0x50, /*!< Protect upper 32KB 0x0F8000 ~ 0x0FFFFF */ XFLASH_PROT_LOWER_4KB = 0x64, /*!< Protect lower 4KB 0x000000 ~ 0x000FFF */ XFLASH_PROT_LOWER_8KB = 0x68, /*!< Protect lower 8KB 0x000000 ~ 0x001FFF */ XFLASH_PROT_LOWER_16KB = 0x6C, /*!< Protect lower 16KB 0x000000 ~ 0x003FFF */ XFLASH_PROT_LOWER_32KB = 0x70, /*!< Protect lower 32KB 0x000000 ~ 0x007FFF */ XFLASH_PROT_LOWER_960KB = 0x84, /*!< Protect lower 960KB 0x000000 ~ 0x0EFFFF */ XFLASH_PROT_LOWER_896KB = 0x88, /*!< Protect lower 896KB 0x000000 ~ 0x0DFFFF */ XFLASH_PROT_LOWER_768KB = 0x8C, /*!< Protect lower 960KB 0x000000 ~ 0x0BFFFF */ XFLASH_PROT_UPPER_960KB = 0xA4, /*!< Protect upper 960KB 0x010000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_896KB = 0xA8, /*!< Protect upper 896KB 0x020000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_768KB = 0xAC, /*!< Protect upper 768KB 0x040000 ~ 0x0FFFFF */ XFLASH_PROT_LOWER_1020KB = 0xC4, /*!< Protect lower 1020KB 0x000000 ~ 0x0FEFFF */ XFLASH_PROT_LOWER_1016KB = 0xC8, /*!< Protect lower 1016KB 0x000000 ~ 0x0FDFFF */ XFLASH_PROT_LOWER_1008KB = 0xCC, /*!< Protect lower 1008KB 0x000000 ~ 0x0FBFFF */ XFLASH_PROT_LOWER_992KB = 0xD0, /*!< Protect lower 992KB 0x000000 ~ 0x0F7FFF */ XFLASH_PROT_UPPER_1020KB = 0xE4, /*!< Protect upper 1020KB 0x001000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_1016KB = 0xE8, /*!< Protect upper 1016KB 0x002000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_1008KB = 0xEC, /*!< Protect upper 1008KB 0x004000 ~ 0x0FFFFF */ XFLASH_PROT_UPPER_992KB = 0xF0, /*!< Protect upper 992KB 0x008000 ~ 0x0FFFFF */ }XFLASH_Protection_Type; /** * @brief XFLASH read mode */ typedef enum { XFLASH_NORMAL_READ, /*!< Normal read mode */ XFLASH_FAST_READ, /*!< Fast read mode */ XFLASH_FAST_READ_DUAL_OUT, /*!< Fast read dual output mode */ XFLASH_FAST_READ_DUAL_IO, /*!< Fast read dual IO mode */ XFLASH_FAST_READ_QUAD_OUT, /*!< Fast read quad output mode */ XFLASH_FAST_READ_QUAD_IO, /*!< Fast read quad IO mode */ XFLASH_WORD_FAST_READ_QUAD_IO, /*!< Word fast read quad IO mode */ XFLASH_OCTAL_WORD_FAST_READ_QUAD_IO, /*!< Octal word fast read quad IO mode */ }XFLASH_ReadMode_Type; /** * @brief XFLASH program mode */ typedef enum { XFLASH_PROGRAM_NORMAL, /*!< Normal page program mode */ XFLASH_PROGRAM_QUAD, /*!< Quad page program mode */ }XFLASH_ProgramMode_Type; /*@} end of group XFLASH_Public_Types definitions */ /** @defgroup XFLASH_CONSTANTS * @{ */ #define XFLASH_PAGE_SIZE 0x100 /*!< 256 bytes */ #define XFLASH_SECTOR_SIZE 0x1000 /*!< 4KB */ #define XFLASH_32K_BLOCK_SIZE 0x8000 /*!< 32KB */ #define XFLASH_64K_BLOCK_SIZE 0x10000 /*!< 64KB */ #define XFLASH_CHIP_SIZE 0x100000 /*!< 1MB */ #define XFLASH_LAST_SECTOR ((XFLASH_CHIP_SIZE/XFLASH_SECTOR_SIZE) - 1) #define XFLASH_LAST_32K_BLOCK ((XFLASH_CHIP_SIZE/XFLASH_32K_BLOCK_SIZE) - 1) #define XFLASH_LAST_64K_BLOCK ((XFLASH_CHIP_SIZE/XFLASH_64K_BLOCK_SIZE) - 1) /*@} end of group XFLASH_CONSTANTS */ /** @defgroup XFLASH_Public_Constants * @{ */ /*@} end of group XFLASH_Public_Constants */ /** @defgroup XFLASH_Public_Macro * @{ */ /*@} end of group XFLASH_Public_Macro */ /** @defgroup XFLASH_Public_FunctionDeclaration * @brief XFLASH functions statement * @{ */ void XFLASH_SelectInterface(XFLASH_Interface_Type interface); void XFLASH_PowerDown(FunctionalState newCmd); Status XFLASH_SetProtectionMode(XFLASH_Protection_Type protectMode); Status XFLASH_EraseAll(void); Status XFLASH_SectorErase(uint32_t sectorNumber); Status XFLASH_Block32KErase(uint32_t blockNumber); Status XFLASH_Block64KErase(uint32_t blockNumber); Status XFLASH_Erase(uint32_t startAddr, uint32_t endAddr); uint32_t XFLASH_Read(XFLASH_ReadMode_Type readMode, uint32_t address, uint8_t *buffer, uint32_t num); uint32_t XFLASH_WordRead(XFLASH_ReadMode_Type readMode, uint32_t address); uint8_t XFLASH_ByteRead(XFLASH_ReadMode_Type readMode, uint32_t address); Status XFLASH_PageWrite(XFLASH_ProgramMode_Type programMode, uint32_t address, uint8_t *buffer, uint32_t num); Status XFLASH_Write(XFLASH_ProgramMode_Type programMode, uint32_t address, uint8_t *buffer, uint32_t num); Status XFLASH_WordWrite(XFLASH_ProgramMode_Type programMode, uint32_t address, uint32_t data); Status XFLASH_ByteWrite(XFLASH_ProgramMode_Type programMode, uint32_t address, uint8_t data); uint64_t XFLASH_GetUniqueID(void); /*@} end of group XFLASH_Public_FunctionDeclaration */ /*@} end of group XFLASH */ /*@} end of group MC200_Periph_Driver */ #endif /* __MC200_XFLASH_H__ */
// Draws two copies of the format bits (with its own error correction code) based // on the given mask and error correction level. This always draws all modules of // the format bits, unlike drawWhiteFunctionModules() which might skip black modules. static void drawFormatBits(enum qrcodegen_Ecc ecl, enum qrcodegen_Mask mask, uint8_t qrcode[]) { assert(0 <= (int)mask && (int)mask <= 7); int data; switch (ecl) { case qrcodegen_Ecc_LOW : data = 1; break; case qrcodegen_Ecc_MEDIUM : data = 0; break; case qrcodegen_Ecc_QUARTILE: data = 3; break; case qrcodegen_Ecc_HIGH : data = 2; break; default: assert(false); } data = data << 3 | (int)mask; int rem = data; for (int i = 0; i < 10; i++) rem = (rem << 1) ^ ((rem >> 9) * 0x537); data = data << 10 | rem; data ^= 0x5412; assert(data >> 15 == 0); for (int i = 0; i <= 5; i++) setModule(qrcode, 8, i, ((data >> i) & 1) != 0); setModule(qrcode, 8, 7, ((data >> 6) & 1) != 0); setModule(qrcode, 8, 8, ((data >> 7) & 1) != 0); setModule(qrcode, 7, 8, ((data >> 8) & 1) != 0); for (int i = 9; i < 15; i++) setModule(qrcode, 14 - i, 8, ((data >> i) & 1) != 0); int qrsize = qrcodegen_getSize(qrcode); for (int i = 0; i <= 7; i++) setModule(qrcode, qrsize - 1 - i, 8, ((data >> i) & 1) != 0); for (int i = 8; i < 15; i++) setModule(qrcode, 8, qrsize - 15 + i, ((data >> i) & 1) != 0); setModule(qrcode, 8, qrsize - 8, true); }
package main import ( "bufio" "bytes" "context" "crypto/tls" "crypto/x509" "errors" "fmt" "io/ioutil" "math" "math/rand" "net" "net/http" "net/url" "os" "strings" "time" b64 "encoding/base64" "github.com/ShowMax/go-fqdn" "github.com/cyralinc/pushprox/util" "github.com/cyralinc/wrapper-utils/iplookup" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog/flag" "gopkg.in/alecthomas/kingpin.v2" ) const ( ConfigPushClientDefaultValue = "http://localhost:8050" ConfigFilePath = "config-client.yaml" EnvFqdnKey = "CYRAL_PUSH_CLIENT_FQDN" EnvProxyURL = "CYRAL_PUSH_CLIENT_PROXY_URL" EnvASGInstanceID = "CYRAL_PUSH_CLIENT_ASG_INSTANCE_ID" defaultMaxDelayBetweenPolls = "0" ) var ( myFqdn = kingpin.Flag("fqdn", "FQDN to register with").Default(fqdn.Get()).OverrideDefaultFromEnvar(EnvFqdnKey).String() scrapeTargetHost = kingpin.Flag("scrape-target-host", "The target host to scrape").Default("").String() proxyURL = kingpin.Flag("proxy-url", "Push proxy to talk to.").Default(ConfigPushClientDefaultValue).OverrideDefaultFromEnvar(EnvProxyURL).String() caCertFile = kingpin.Flag("tls.cacert", "<file> CA certificate to verify peer against").String() tlsCert = kingpin.Flag("tls.cert", "<cert> Client certificate file").String() tlsKey = kingpin.Flag("tls.key", "<key> Private key file").String() metricsAddr = kingpin.Flag("metrics-addr", "Serve Prometheus metrics at this address").Default(":9369").String() configFilePath = kingpin.Flag("config-file", "Config file path (Unused)").Default(ConfigFilePath).String() maxDelayBetweenPolls = kingpin.Flag("max-delay-between-polls", "Max delay in seconds between poll requests").Default(defaultMaxDelayBetweenPolls).Int64() asgInstanceID = kingpin.Flag("asg-instance-id", "Auto scaling group instance id").Default("").OverrideDefaultFromEnvar(EnvASGInstanceID).String() useInstanceID = kingpin.Flag("use-instance-id", "use instance id in poll requests").Default("true").Bool() ) var ( scrapeErrorCounter = prometheus.NewCounter( prometheus.CounterOpts{ Name: "pushprox_client_scrape_errors_total", Help: "Number of scrape errors", }, ) pushErrorCounter = prometheus.NewCounter( prometheus.CounterOpts{ Name: "pushprox_client_push_errors_total", Help: "Number of push errors", }, ) pollErrorCounter = prometheus.NewCounter( prometheus.CounterOpts{ Name: "pushprox_client_poll_errors_total", Help: "Number of poll errors", }, ) ) func init() { prometheus.MustRegister(pushErrorCounter, pollErrorCounter, scrapeErrorCounter) } // Coordinator for scrape requests and responses type Coordinator struct { logger log.Logger } func getIDAsB64(asgID string) string { return b64.StdEncoding.EncodeToString([]byte(asgID)) } func (c *Coordinator) doScrape(request *http.Request, client *http.Client) { logger := log.With(c.logger, "scrape_id", request.Header.Get("id")) timeout, _ := util.GetHeaderTimeout(request.Header) ctx, cancel := context.WithTimeout(request.Context(), timeout) defer cancel() request = request.WithContext(ctx) // We cannot handle https requests at the proxy, as we would only // see a CONNECT, so use a URL parameter to trigger it. params := request.URL.Query() if params.Get("_scheme") == "https" { request.URL.Scheme = "https" params.Del("_scheme") request.URL.RawQuery = params.Encode() } scrapeResp, err := client.Do(request) if err != nil { msg := fmt.Sprintf("Failed to scrape %s: %s", request.URL.String(), err) level.Warn(logger).Log("msg", "Failed to scrape", "Request URL", request.URL.String(), "err", err) scrapeErrorCounter.Inc() resp := &http.Response{ StatusCode: 500, Header: http.Header{}, Body: ioutil.NopCloser(strings.NewReader(msg)), } err = c.doPush(resp, request, client) if err != nil { pushErrorCounter.Inc() level.Warn(logger).Log("msg", "Failed to push failed scrape response:", "err", err) return } level.Info(logger).Log("msg", "Pushed failed scrape response") return } level.Debug(logger).Log("msg", "Retrieved scrape response") err = c.doPush(scrapeResp, request, client) if err != nil { pushErrorCounter.Inc() level.Warn(logger).Log("msg", "Failed to push scrape response:", "err", err) return } level.Debug(logger).Log("msg", "Pushed scrape result") } // Report the result of the scrape back up to the proxy. func (c *Coordinator) doPush(resp *http.Response, origRequest *http.Request, client *http.Client) error { resp.Header.Set("id", origRequest.Header.Get("id")) // Link the request and response // Remaining scrape deadline. deadline, _ := origRequest.Context().Deadline() resp.Header.Set("X-Prometheus-Scrape-Timeout", fmt.Sprintf("%f", float64(time.Until(deadline))/1e9)) base, err := url.Parse(*proxyURL) if err != nil { return err } u, err := url.Parse("push") if err != nil { return err } url := base.ResolveReference(u) buf := &bytes.Buffer{} resp.Write(buf) request := &http.Request{ Method: "POST", URL: url, Body: ioutil.NopCloser(buf), ContentLength: int64(buf.Len()), } request = request.WithContext(origRequest.Context()) _, err = client.Do(request) if err != nil { return err } return nil } func loop(fqdn string, c Coordinator, t *http.Transport) error { client := &http.Client{Transport: t} base, err := url.Parse(*proxyURL) if err != nil { level.Error(c.logger).Log("msg", "Error parsing url:", "err", err) return errors.New("error parsing url") } u, err := url.Parse("poll") if err != nil { level.Error(c.logger).Log("msg", "Error parsing url:", "err", err) return errors.New("error parsing url poll") } url := base.ResolveReference(u) resp, err := client.Post(url.String(), "", strings.NewReader(fqdn)) if err != nil { level.Error(c.logger).Log("msg", "Error polling:", "err", err) return errors.New("error polling") } defer resp.Body.Close() request, err := http.ReadRequest(bufio.NewReader(resp.Body)) if err != nil { level.Error(c.logger).Log("msg", "Error reading request:", "err", err) return errors.New("error reading request") } level.Debug(c.logger).Log("msg", "Got scrape request", "scrape_id", request.Header.Get("id"), "url", request.URL) request.RequestURI = "" if *scrapeTargetHost != "" { request.URL = util.ReplaceUrlHost(request.URL, *scrapeTargetHost) level.Info(c.logger).Log("msg", "Modified scrape target", "scrape_id", request.Header.Get("id"), "url", request.URL) } go c.doScrape(request, client) return nil } // generates delay in a range type variationDelay struct { min float64 max float64 } // newVariationDelay constructs variationDelay object with a range func newVariationDelay() variationDelay { return variationDelay{ min: 5e8, //500ms max: float64(time.Duration(*maxDelayBetweenPolls) * time.Second), //20s } } // sleep will sleep for a random duration between func (v *variationDelay) sleep() { if v.max < v.min { return } d := math.Min(v.max, v.min+(rand.Float64()*v.max)) time.Sleep(time.Duration(d)) } // decorrelated Jitter increases the maximum jitter based on the last random value. type decorrelatedJitter struct { duration float64 // sleep time min float64 // min sleep time cap float64 // max sleep time } func newJitter() decorrelatedJitter { return decorrelatedJitter{ duration: 15e9, min: 15e9, //15s cap: 30e9, } } func (d *decorrelatedJitter) calc() { d.duration = math.Min(d.cap, d.min+rand.Float64()*(d.duration*3-d.min)) } func (d *decorrelatedJitter) sleep() { d.calc() time.Sleep(time.Duration(d.duration)) } func getASGInstanceID() string { if *asgInstanceID == "" { return iplookup.FindMyIPV4() } else { return *asgInstanceID } } func getASGInstanceIDInBase64() string { return getIDAsB64(getASGInstanceID()) } // getFQDN returns concatenation of asg instance id and input fqdn func getFQDN() string { if *useInstanceID { return getASGInstanceIDInBase64() + "." + *myFqdn } return *myFqdn } func main() { promlogConfig := promlog.Config{} flag.AddFlags(kingpin.CommandLine, &promlogConfig) kingpin.HelpFlag.Short('h') kingpin.Parse() logger := promlog.New(&promlogConfig) //level.AllowDebug() coordinator := Coordinator{logger: logger} if *proxyURL == "" { level.Error(coordinator.logger).Log("msg", "--proxy-url flag must be specified.") os.Exit(1) } // Make sure proxyURL ends with a single '/' *proxyURL = strings.TrimRight(*proxyURL, "/") + "/" level.Info(coordinator.logger).Log("msg", "URL, InstanceID, FQDN info", "proxy_url", *proxyURL, "instanceID", getASGInstanceID(), "instanceIDBas64", getASGInstanceIDInBase64(), "fqdn", *myFqdn) tlsConfig := &tls.Config{} if *tlsCert != "" { cert, err := tls.LoadX509KeyPair(*tlsCert, *tlsKey) if err != nil { level.Error(coordinator.logger).Log("msg", "Certificate or Key is invalid", "err", err) os.Exit(1) } // Setup HTTPS client tlsConfig.Certificates = []tls.Certificate{cert} tlsConfig.BuildNameToCertificate() } if *caCertFile != "" { caCert, err := ioutil.ReadFile(*caCertFile) if err != nil { level.Error(coordinator.logger).Log("msg", "Not able to read cacert file", "err", err) os.Exit(1) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(caCert); !ok { level.Error(coordinator.logger).Log("msg", "Failed to use cacert file as ca certificate") os.Exit(1) } tlsConfig.RootCAs = caCertPool } if *metricsAddr != "" { go func() { if err := http.ListenAndServe(*metricsAddr, promhttp.Handler()); err != nil { level.Warn(coordinator.logger).Log("msg", "ListenAndServe", "err", err) } }() } transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).DialContext, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, TLSClientConfig: tlsConfig, } rand.Seed(time.Now().UnixNano()) variationDelay := newVariationDelay() jitter := newJitter() fqdn := getFQDN() for { err := loop(fqdn, coordinator, transport) if err != nil { pollErrorCounter.Inc() jitter.sleep() continue } variationDelay.sleep() } }
<gh_stars>1-10 interface KoaContext { path: string, req: IncomingMessage, [propname: string]: any, }
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved. import speakeasy.winenv.defs.nt.ddk as ddk from speakeasy.winenv.api import api class Usbd(api.ApiHandler): """ Implements the USB stack driver (USBD.sys) """ name = 'usbd' apihook = api.ApiHandler.apihook impdata = api.ApiHandler.impdata class ApiEmuError(Exception): pass def __init__(self, emu): super(Usbd, self).__init__(emu) self.funcs = {} self.data = {} super(Usbd, self).__get_hook_attrs__(self) @apihook('USBD_ValidateConfigurationDescriptor', argc=5) def USBD_ValidateConfigurationDescriptor(self, emu, argv, ctx={}): """ USBD_STATUS USBD_ValidateConfigurationDescriptor( PUSB_CONFIGURATION_DESCRIPTOR ConfigDesc, ULONG BufferLength, USHORT Level, PUCHAR *Offset, ULONG Tag ); """ rv = ddk.STATUS_SUCCESS ConfigDesc, BufferLength, Level, Offset, Tag = argv return rv
Adam lived 930 years (Gen. 5:5)--"And all the days that Adam lived were nine hundred and thirty years: and he died." Seth lived 912 years (Gen. 5:8)--"And all the days of Seth were nine hundred and twelve years: and he died." Methuselah lived 969 years (Gen. 5:27)--"And all the days of Methuselah were nine hundred sixty and nine years: and he died." After the fall, the genetic line of Adam and his descendants was very pure, so their health would have been incredible. Living that long would not have been a problem. Also, some theologians think that there was a canopy of water that engulfed the entire earth, and that it was released at the time of the flood. "In the six hundredth year of Noah's life, in the second month, on the seventeenth day of the month, on the same day all the fountains of the great deep burst open, and the floodgates of the sky were opened." (Gen. 7:11). The "floodgates of the sky" are sometimes alluded to as great amounts of water suspended in the sky. Also, no rain is recorded in the Bible until after the flood which seems to support this idea. This canopy, if it is true, might have provided some sort of protection from the sun's harmful rays. We can't know for sure, and it is only a theory. Nevertheless, after the flood, the lifespan of people on earth was drastically reduced. "Then the LORD said, 'My Spirit shall not strive with man forever, because he also is flesh; nevertheless his days shall be one hundred and twenty years.'" (Gen. 6:3). Whether or not this reduced canopy had any affect on human lifespan may never be known. This article is also available in: Español, Indonesia
// Pipe is a utility to pipe from the reader to the writer, and // writes the error to res (may be nil). func Pipe(reader, writer SSConn, buf *SSBuffer, res chan error) { var err error defer func() { res <- err }() for { if err = writer.SSWrite(buf); err != nil { return } err = reader.SSRead(buf) if err == io.EOF { break } else if err != nil { return } } if err = writer.SSWrite(buf); err != nil { return } }
# -*- coding: utf-8 -*- # Usage: py.test tests import sys import os import json import tempfile import shutil import collections import pytest from os import path from keyvi.compiler import JsonDictionaryCompiler, JsonDictionaryMerger from keyvi.dictionary import Dictionary from keyvi import loading_strategy_types root = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(root, "../")) key_values_1 = { 'a' : {"a": 2}, 'bzzzz' : {"b": 2}, 'bbb' : {"b": 2}, 'b' : {"b": 2} } key_values_2 = { 'a' : {"ab": 23}, 'c' : {"c": 2}, 'i' : {"c": 2}, 'ia' : {"c": 2}, 'd' : {"d": 2} } key_values_3 = { 'e' : {"e": 2}, 'd' : {"dddd": 1233}, 'e1' : {"e": 2}, 'e2' : {"e": 22}, 'e3' : {"e": 21}, 'e4' : {"e": 2}, 'f' : {"f": 2} } def generate_keyvi(key_values, filename): dictionary_compiler = JsonDictionaryCompiler({"memory_limit_mb":"10"}) for key, value in key_values.items(): dictionary_compiler.Add(key, json.dumps(value)) dictionary_compiler.Compile() dictionary_compiler.WriteToFile(filename) @pytest.mark.parametrize('merger', [JsonDictionaryMerger({"memory_limit_mb":"10"}), JsonDictionaryMerger({"memory_limit_mb":"10", 'merge_mode': 'append'})]) def test_merge(merger): tmp_dir = tempfile.mkdtemp() try: file_1 = path.join(tmp_dir, 'test_merger_1.kv') file_2 = path.join(tmp_dir, 'test_merger_2.kv') file_3 = path.join(tmp_dir, 'test_merger_3.kv') merge_file = path.join(tmp_dir, 'merge.kv') generate_keyvi(key_values_1, file_1) generate_keyvi(key_values_2, file_2) generate_keyvi(key_values_3, file_3) merger.Add(file_1) merger.Add(file_2) merger.Add(file_3) merger.Merge(merge_file) merged_dictionary = Dictionary(merge_file, loading_strategy_types.populate_lazy) key_values = {} key_values.update(key_values_1) key_values.update(key_values_2) key_values.update(key_values_3) key_values_ordered = collections.OrderedDict(sorted(key_values.items())) for (base_key, base_value), (keyvi_key, keyvi_value) in zip(key_values_ordered.items(), merged_dictionary.GetAllItems()): assert base_key == keyvi_key assert base_value == keyvi_value finally: shutil.rmtree(tmp_dir)
n=int(input()) hl=list(map(int,input().split())) c=0 mi=min(hl) c+=mi hdebug=hl[:] hdebug.append(0) if mi!=0: for i in range(len(hl)): hl[i]-=mi hl.append(0) for x in range(100): a=101 for i in range(len(hl)): if hl[i]!=0 and a>hl[i] and a==101: a=i if hl[i]==0 and a<i: mi2=min(hl[a:i]) c+=mi2 for j in range(a,i): hl[j]-=mi2 print(c)
// Help methods for tesing pages with multiple showads snippets. GoogleString GetHtmlPageMultipleShowAds() { return GetPage( StrCat(GetShowAdsDataSnippetWithContent(GetShowAdsDataContent1()), kShowAdsApiCall, GetShowAdsDataSnippetWithContent(kShowAdsDataContentFormat2), kShowAdsApiCall)); }
Russia is not planning to supply Syria with any weapons beyond the current contracts that are nearing completion, Russian Foreign Minister Sergey Lavrov said refuting speculations that Moscow was going to sell S-300 air defense systems to Damascus. “Russia does not plan to sell,” Lavrov told reporters on being asked on S-300 air defense systems rumors. He stressed that Russia has only been fulfilling contracts that have already been signed with Syria for defensive weapons. On Thursday the White House urged Russia not to sell weapons to Syria, saying that such assistance to Damascus is "particularly destabilizing to the region." “I think we’ve made it crystal clear we would prefer that Russia was not supplying assistance ” to the Syrian regime in its war against opposition forces, US Secretary of State John Kerry said in Rome Thursday. The warning from the US came a day after the Wall Street Journal reported that Israel had informed the Obama administration about Moscow’s alleged plans to transfer S-300 missile batteries to Syrian security forces, possibly as early as this summer. Kerry did not directly respond to the report, but said the United States has repeatedly opposed arms deals in the past. He reiterated the US position against transfers of missile systems to Syria because of the possible threat to Israel. According to the report, Israel informed the US that Syria made payments on a 2010 deal to buy four batteries from Russia for $900 million. That included six launchers and 144 missiles, each with a range of 125 miles (200 kilometers). Despite repeated accusations it has tried to deliver military equipment to Syria, Moscow mantains that it is only fulfilling contracts that are already signed, which are for defensive weapons. Speaking to German media in Hannover in April, Russian President Vladimir Putin responded to the allegations, saying that Russia only “supplies the legitimate regime... This is not prohibited by international law.” Russian Foreign Minister Lavrov has also insisted that Russia has no plans to sign any further military contracts with Syria, and now only honors old contracts, many of which date back to the Soviet era. Following an incident in which a Russian ship was intercepted carrying helicopters for Damascus, then-US Secretary of State Hillary Clinton slammed Russia for sending “combat” helicopters to the Syrian government, but later the statement was retracted, with the US acknowledging they were “old helicopters” refurbished under previous contracts. Russia and the US have in a deadlock over the Syrian conflict and thus far failed to agree on an approach to resolve the two-year long crisis in the war-torn country. While Washington insists that Assad and his regime must step down, Moscow maintains that only the Syrian people should decide and foreign interference should not be the determining factor. Russia insists that only direct talks between parties involved in the conflict – the government and the opposition – can bring an end to the fighting, which according to the UN has claimed lives of more than 70,000 people.
package git import ( "fmt" "os/exec" "strings" ) var execCommand = exec.Command // Tags returns the list of git tags as a string slice func Tags() ([]string, error) { cmd := execCommand("git", "tag") out, err := cmd.Output() if err != nil { return nil, fmt.Errorf("fetching git tags: %v", err) } trimmed := strings.TrimSpace(string(out)) lines := strings.Split(trimmed, "\n") return lines, nil } // Tag calls git to create a new tag from a string func Tag(tag string) error { cmd := execCommand("git", "tag", tag) _, err := cmd.Output() if err != nil { return fmt.Errorf("tagging the commit in git: %v", err) } return nil } // LastCommit gets the last commit SHA func LastCommit(short bool) (string, error) { var cmd *exec.Cmd if short { cmd = execCommand("git", "rev-parse", "--short", "HEAD") } else { cmd = execCommand("git", "rev-parse", "HEAD") } out, err := cmd.Output() if err != nil { return "", fmt.Errorf("fetching git commit: %v", err) } trimmed := strings.TrimSpace(string(out)) return trimmed, nil } // LastCommitMessage gets the last commit message func LastCommitMessage() (string, error) { cmd := execCommand("git", "log", "-1", "--pretty=%B") out, err := cmd.Output() if err != nil { return "", fmt.Errorf("fetching git commit message: %v", err) } trimmed := strings.TrimSpace(string(out)) return trimmed, nil } // Tagged returns true if the specified commit has been tagged func Tagged() (bool, error) { commit, err := LastCommit(false) if err != nil { return false, fmt.Errorf("checking current tag: %v", err) } cmd := execCommand("git", "tag", "--contains", commit) t, err := cmd.Output() if err != nil { return false, nil } return len(string(t)) > 0, nil }
from sympy.core.basic import Basic from sympy.core.numbers import Rational from sympy.core.singleton import S, Singleton def test_Singleton(): global instantiated instantiated = 0 class MySingleton(Basic, metaclass=Singleton): def __new__(cls): global instantiated instantiated += 1 return Basic.__new__(cls) assert instantiated == 0 MySingleton() # force instantiation assert instantiated == 1 assert MySingleton() is not Basic() assert MySingleton() is MySingleton() assert S.MySingleton is MySingleton() assert instantiated == 1 class MySingleton_sub(MySingleton): pass assert instantiated == 1 MySingleton_sub() assert instantiated == 2 assert MySingleton_sub() is not MySingleton() assert MySingleton_sub() is MySingleton_sub() def test_singleton_redefinition(): class TestSingleton(Basic, metaclass=Singleton): pass assert TestSingleton() is S.TestSingleton class TestSingleton(Basic, metaclass=Singleton): pass assert TestSingleton() is S.TestSingleton def test_names_in_namespace(): # Every singleton name should be accessible from the 'from sympy import *' # namespace in addition to the S object. However, it does not need to be # by the same name (e.g., oo instead of S.Infinity). # As a general rule, things should only be added to the singleton registry # if they are used often enough that code can benefit either from the # performance benefit of being able to use 'is' (this only matters in very # tight loops), or from the memory savings of having exactly one instance # (this matters for the numbers singletons, but very little else). The # singleton registry is already a bit overpopulated, and things cannot be # removed from it without breaking backwards compatibility. So if you got # here by adding something new to the singletons, ask yourself if it # really needs to be singletonized. Note that SymPy classes compare to one # another just fine, so Class() == Class() will give True even if each # Class() returns a new instance. Having unique instances is only # necessary for the above noted performance gains. It should not be needed # for any behavioral purposes. # If you determine that something really should be a singleton, it must be # accessible to sympify() without using 'S' (hence this test). Also, its # str printer should print a form that does not use S. This is because # sympify() disables attribute lookups by default for safety purposes. d = {} exec('from sympy import *', d) for name in dir(S) + list(S._classes_to_install): if name.startswith('_'): continue if name == 'register': continue if isinstance(getattr(S, name), Rational): continue if getattr(S, name).__module__.startswith('sympy.physics'): continue if name in ['MySingleton', 'MySingleton_sub', 'TestSingleton']: # From the tests above continue if name == 'NegativeInfinity': # Accessible by -oo continue # Use is here to ensure it is the exact same object assert any(getattr(S, name) is i for i in d.values()), name
/** * Update procedure from the view model in order to get new information * without the need to manually refresh. */ @Override public void update() { HashSet<Student> students = studentMaterialSelector .getCurrentlySelectedStudents(); HashSet<BorrowedMaterial> materials = studentMaterialSelector .getCurrentlySelectedBorrowedMaterials(); if (materials.size() >= 1) { buttonSaveSelectedData.setEnabled(true); } else { buttonSaveSelectedData.setEnabled(false); } if (students.size() <= 1) { buttonManualReturn.setEnabled(true); } else { buttonManualReturn.setEnabled(false); } if (students.size() >= 1) { buttonStudentList.setEnabled(true); } else { buttonStudentList.setEnabled(false); } }
// // PDLLayerDebuggerWindow.h // Poodle // // Created by Poodle on 15/10/2016. // Copyright © 2016 Poodle. All rights reserved. // #import "PDLScreenDebuggerWindow.h" @interface PDLLayerDebuggerWindow : PDLScreenDebuggerWindow @property (nonatomic, weak) CALayer *debuggingLayer; @end
/* BEGIN_ICS_COPYRIGHT5 **************************************** Copyright (c) 2015-2017, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** END_ICS_COPYRIGHT5 ****************************************/ /* [ICS VERSION STRING: unknown] */ /******************************************************************************* * * INCLUDES */ #include <stdio.h> #include "byteswap.h" #include "opamgt_priv.h" #include "opasadb_route2.h" #include "statustext.h" /******************************************************************************* * * DEFINES */ // Port Handle Table Entry: table of entries, 1 for each created port handle // Note that this struct must align with op_route_param_alloc_entry in // opasadb_route2.h struct param_alloc_port_handle_entry { uint64_t reserved; // Not used uint64_t reserved2; // Not used uint64_t reserved3; // Not used OP_ROUTE_PORT_HANDLE port_handle; // port_handle passed to user uint64_t port_guid; // Port GUID associated w/port_handle int port_id; // Port ID associated w/port_handle }; /******************************************************************************* * * LOCAL VARIABLES */ // Port handle table static struct op_route_param_alloc param_port_handle = { 0, 0, 0, 0, NULL }; // Send/recv timeout mSec (start value 4294 mSec based on RespTimeValue = 19) static int num_timeout_ms = (2 * 4096 * (1L << 19)) / 1000000; static int ct_retry = DEFAULT_SD_RETRY_COUNT; // Send/recv retry count /******************************************************************************* * * LOCAL FUNCTIONS */ /******************************************************************************* * * op_route_hton() * * Description: * Copy host data to network buffer, placing the most significant byte of * interest of host data in the first byte of the network buffer. The * copy is done a byte at a time so that the endian-ness of the CPU and * word/long-word boundaries in the network buffer do not matter. The * least significant 'length' bytes of host data are copied. The pointer * to the network buffer is updated to point to the next available network * data location. * * Inputs: * pp_bfr_net - Pointer to pointer to network buffer * host - Host data to copy * length - Length of data (in bytes) to copy (max 8) * * Outputs: * *pp_bfr_net - Next available network data location */ static void op_route_hton( uint8_t ** pp_bfr_net, uint64_t host, int length ) { int ix; uint8_t * p_bfr_net; if ( pp_bfr_net && (p_bfr_net = *pp_bfr_net) && (length > 0) && (length <= 8) ) { for (ix = length, p_bfr_net += (length - 1); ix > 0; host >>= 8, ix--) *p_bfr_net-- = host & 0xFF; *pp_bfr_net += length; } } // End of op_route_hton() /******************************************************************************* * * op_route_ntoh() * * Description: * Copy network buffer to host data, placing the first byte of the network * buffer in the most significant byte of interest of host data. The * copy is done a byte at a time so that the endian-ness of the CPU and * word/long-word boundaries in the network buffer do not matter. 'length' * bytes of the network buffer are copied to the least significant 'length' * bytes of host data, which is returned to the caller. The pointer to * the network buffer is updated to point to the next available network * data location. * * Inputs: * pp_bfr_net - Pointer to pointer to network buffer * length - Length of data (in bytes) to copy (max 8) * * Outputs: * host data - Network data available * *pp_bfr_net = next available network data location * 0 - No network data available) */ static uint64_t op_route_ntoh( uint8_t ** pp_bfr_net, int length ) { int ix; uint8_t * p_bfr_net; uint64_t host = 0; if ( pp_bfr_net && (p_bfr_net = *pp_bfr_net) && (length > 0) && (length <= 8) ) { for (ix = length; ix > 0; ix--) host = (host << 8) | *p_bfr_net++; *pp_bfr_net += length; } return (host); } // End of op_route_ntoh() /******************************************************************************* * * op_route_gen_port_handle() * * Description: * Generate a pseudo-random port handle. Use multiple calls to rand() if * an int is smaller than OP_ROUTE_PORT_HANDLE or to prevent a port handle * of 0. * * Inputs: * none * * Outputs: * OP_ROUTE_PORT_HANDLE */ static OP_ROUTE_PORT_HANDLE op_route_gen_port_handle(void) { static unsigned seed_rand = 0; OP_ROUTE_PORT_HANDLE port_handle; int fb_loop; if (!seed_rand) { seed_rand = (unsigned)time(NULL); srand(seed_rand); } fb_loop = (sizeof(int) < sizeof(OP_ROUTE_PORT_HANDLE)); for ( port_handle = (OP_ROUTE_PORT_HANDLE)rand(); fb_loop || !port_handle; fb_loop = FALSE ) port_handle = ( (port_handle << (sizeof(int) * 8)) | (OP_ROUTE_PORT_HANDLE)rand() ); return (port_handle); } // End of op_route_gen_port_handle() /******************************************************************************* * * op_route_get_port_handle_entry() * * Description: * Get the port table entry associated with the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * * Outputs: * Pointer to port table entry, or NULL if entry not found */ static struct param_alloc_port_handle_entry * op_route_get_port_handle_entry( OP_ROUTE_PORT_HANDLE port_handle ) { uint32_t ix; struct param_alloc_port_handle_entry * p_param_entry; if (port_handle) for ( ix = param_port_handle.num_allocated, p_param_entry = (struct param_alloc_port_handle_entry *)param_port_handle.p_params; ix > 0; p_param_entry++, ix-- ) if (p_param_entry->port_handle == port_handle) return (p_param_entry); return (NULL); } // End of op_route_get_port_handle_entry() /******************************************************************************* * * op_route_send_recv_query() * * Description: * Create an OP_ROUTE query, send it and get the response. The following * queries are supported: * OP_ROUTE_AMOD_CLASS_INFO * OP_ROUTE_AMOD_CREATE_JOB * OP_ROUTE_AMOD_SET_USE_MATRIX * OP_ROUTE_AMOD_POLL_READY * OP_ROUTE_AMOD_COMPLETE_JOB * OP_ROUTE_AMOD_GET_PORTGUID_VEC * OP_ROUTE_AMOD_GET_SWITCH_MAP * OP_ROUTE_AMOD_GET_COST_MATRIX * OP_ROUTE_AMOD_GET_USE_MATRIX * OP_ROUTE_AMOD_GET_JOBS * * Inputs: * attr_mod - Attribute modifier * port_handle - OP_ROUTE_PORT_HANDLE * optn_query - Query options (AMOD specific) * p_job_id - Pointer to OP_ROUTE_JOB_ID * p_job_status - Pointer to job_status * p_job_params - Pointer to job parameters * p_guid_vec - Pointer to portguid_vec * p_switch_map - Pointer to switch_map * pp_cost_matrix - Pointer to pointer to cost_matrix * p_use_matrix - Pointer to use_matrix * p_job_list - Pointer to job_list * port - Pointer to opamgt handler * * Outputs: * OP_ROUTE_STATUS_OK - Query successful * OP_ROUTE_STATUS_INVALID_PARAM - port_handle invalid * - p_job_status NULL * - p_guid_vec NULL * - p_switch_map NULL * - pp_cost_matrix NULL * - p_use_matrix NULL * OP_ROUTE_STATUS_ERROR - Registration error * - Unable to allocate memory */ static enum op_route_status op_route_send_recv_query( uint32_t attr_mod, OP_ROUTE_PORT_HANDLE port_handle, uint16_t optn_query, OP_ROUTE_JOB_ID * p_job_id, enum op_route_job_status * p_job_status, struct op_route_job_parameters * p_job_params, struct op_route_portguid_vec * p_guid_vec, struct op_route_switch_map * p_switch_map, uint16_t ** pp_cost_matrix, struct op_route_use_matrix * p_use_matrix, struct op_route_job_list * p_job_list, struct omgt_port * port) { FSTATUS fstatus; enum op_route_status rstatus = OP_ROUTE_STATUS_OK; int ix, ix_2, ix_3; struct param_alloc_port_handle_entry * p_port_handle_entry = NULL; SA_MAD * p_mad_send = NULL; SA_MAD * p_mad_recv = NULL; uint8_t * p_data_wire; size_t len_data_alloc = sizeof(MAD_COMMON) + sizeof(RMPP_HEADER) + sizeof(SA_HDR); int len_recv; int qp_sm = 1; uint8_t base_version; uint8_t mgmt_class; uint8_t class_version; uint8_t class_method; MAD_STATUS mad_status; static uint32_t trans_id = 1; uint16_t attr_id; uint32_t attr_mod_2; uint8_t wstatus; OP_ROUTE_JOB_ID job_id; uint64_t * p_guids = NULL; uint64_t * p_guids_2; uint16_t * p_switch_indices = NULL; uint16_t * p_switch_indices_2; uint16_t * p_cost_matrix_2 = NULL; uint16_t * p_cost_matrix_l; uint16_t * p_cost_matrix_l2; uint16_t * p_cost_matrix_r; uint16_t * p_cost_matrix_r2; struct op_route_use_element * p_use_elements = NULL; struct op_route_use_element * p_use_elements_2; struct op_route_job_info * p_job_info = NULL; struct op_route_job_info * p_job_info_2; uint16_t num_guids=-1, num_guids_2=-1; uint16_t num_switches=-1, num_switches_2=-1; uint16_t num_elements=-1; uint16_t use_multiplier; uint8_t default_use; uint16_t num_jobs; uint64_t temp; struct omgt_mad_addr addr; uint8_t port_state; if (p_guid_vec) num_guids = p_guid_vec->num_guids; // Common processing if (!(p_port_handle_entry = op_route_get_port_handle_entry(port_handle))) return (OP_ROUTE_STATUS_INVALID_PARAM); // Ensure the port is active (void)omgt_port_get_port_state(port, &port_state); if (port_state != PortStateActive) return (OP_ROUTE_STATUS_ERROR); // Construct query based on attribute modifier switch (attr_mod) { case OP_ROUTE_AMOD_CLASS_INFO: case OP_ROUTE_AMOD_GET_JOBS: { if(!(p_mad_send = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } break; } // End of case OP_ROUTE_AMOD_CLASS_INFO / etc case OP_ROUTE_AMOD_CREATE_JOB: { len_data_alloc += ( sizeof(uint16_t) + OP_ROUTE_MAX_JOB_NAME_LEN + OP_ROUTE_MAX_APP_NAME_LEN + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(uint16_t) + (num_guids * sizeof(uint64_t)) ); if(!(p_mad_send = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } p_data_wire = (uint8_t *)(p_mad_send->Data); op_route_hton(&p_data_wire, optn_query, sizeof(uint16_t)); strcpy((char *)p_data_wire, p_job_params->name); p_data_wire += OP_ROUTE_MAX_JOB_NAME_LEN; strcpy((char *)p_data_wire, p_job_params->application_name); p_data_wire += OP_ROUTE_MAX_APP_NAME_LEN; op_route_hton(&p_data_wire, p_job_params->pid, sizeof(p_job_params->pid)); op_route_hton(&p_data_wire, p_job_params->uid, sizeof(p_job_params->uid)); op_route_hton(&p_data_wire, num_guids, sizeof(uint16_t)); for (ix = 0; ix < num_guids; ix++) op_route_hton( &p_data_wire, p_guid_vec->p_guids[ix], sizeof(uint64_t) ); break; } // End of case OP_ROUTE_AMOD_CREATE_JOB case OP_ROUTE_AMOD_SET_USE_MATRIX: { len_data_alloc += ( sizeof(OP_ROUTE_JOB_ID) + sizeof(uint16_t) + sizeof(uint8_t) + sizeof(uint16_t) + ( p_use_matrix->num_elements * (sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint8_t)) ) ); if(!(p_mad_send = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } p_data_wire = (uint8_t *)(p_mad_send->Data); op_route_hton(&p_data_wire, *p_job_id, sizeof(OP_ROUTE_JOB_ID)); op_route_hton(&p_data_wire, p_use_matrix->multiplier, sizeof(uint16_t)); *p_data_wire++ = p_use_matrix->default_use; op_route_hton(&p_data_wire, p_use_matrix->num_elements, sizeof(uint16_t)); for ( ix = p_use_matrix->num_elements, p_use_elements = p_use_matrix->p_elements; ix > 0; p_use_elements++, ix-- ) { op_route_hton( &p_data_wire, (p_use_elements->bursty << 15) | p_use_elements->switch_index, sizeof(uint16_t) ); op_route_hton(&p_data_wire, p_use_elements->dlid, sizeof(uint16_t)); *p_data_wire++ = p_use_elements->use; } break; } // End of case OP_ROUTE_AMOD_SET_USE_MATRIX case OP_ROUTE_AMOD_POLL_READY: case OP_ROUTE_AMOD_COMPLETE_JOB: case OP_ROUTE_AMOD_GET_PORTGUID_VEC: case OP_ROUTE_AMOD_GET_SWITCH_MAP: case OP_ROUTE_AMOD_GET_COST_MATRIX: case OP_ROUTE_AMOD_GET_USE_MATRIX: { len_data_alloc += sizeof(OP_ROUTE_JOB_ID); if(!(p_mad_send = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } p_data_wire = (uint8_t *)(p_mad_send->Data); op_route_hton(&p_data_wire, *p_job_id, sizeof(OP_ROUTE_JOB_ID)); break; } // End of case OP_ROUTE_AMOD_POLL_READY / etc // Invalid attribute modifier default: printf("op_route_send_recv_query: invalid AMOD:%d\n", attr_mod); rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // End of switch (attr_mod) // Prep query for send; send query and receive response MAD_SET_ATTRIB_MOD(p_mad_send, attr_mod); MAD_SET_METHOD_TYPE(p_mad_send, SUBN_ADM_GETMULTI); MAD_SET_VERSION_INFO( p_mad_send, IB_BASE_VERSION, MCLASS_SUBN_ADM, IB_COMM_MGT_CLASS_VERSION ); MAD_SET_TRANSACTION_ID(p_mad_send, trans_id++); MAD_SET_ATTRIB_ID(p_mad_send, SA_ATTRIB_JOB_ROUTE_RECORD); if (len_data_alloc > MAD_BLOCK_SIZE) p_mad_send->RmppHdr.RmppFlags.s.Active = 1; BSWAP_MAD_HEADER((MAD *)p_mad_send); BSWAP_SA_HDR(&p_mad_send->SaHdr); // Send query memset(&addr, 0, sizeof(addr)); (void)omgt_port_get_port_sm_lid(port, &addr.lid); addr.qpn = qp_sm; addr.qkey = QP1_WELL_KNOWN_Q_KEY; addr.pkey = OMGT_DEFAULT_PKEY; fstatus = omgt_send_recv_mad_alloc(port, (uint8_t *)p_mad_send, len_data_alloc, &addr, (uint8_t **)&p_mad_recv, (size_t *)&len_recv, num_timeout_ms, ct_retry); if (fstatus != FSUCCESS || !p_mad_recv) { if (fstatus == FTIMEOUT || fstatus == FNOT_DONE) { rstatus = OP_ROUTE_STATUS_TIMEOUT; goto cleanup; } else { rstatus = OP_ROUTE_STATUS_RECV_ERROR; goto cleanup; } } // Fix endian of the received data & check MAD header if ((len_recv -= IBA_SUBN_ADM_HDRSIZE) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } BSWAP_MAD_HEADER((MAD *)(p_mad_recv)); BSWAP_SA_HDR(&p_mad_recv->SaHdr); MAD_GET_VERSION_INFO(p_mad_recv, &base_version, &mgmt_class, &class_version); MAD_GET_METHOD_TYPE(p_mad_recv, &class_method); MAD_GET_STATUS(p_mad_recv, &mad_status); MAD_GET_ATTRIB_ID(p_mad_recv, &attr_id); MAD_GET_ATTRIB_MOD(p_mad_recv, &attr_mod_2); if ( (base_version < OP_ROUTE_MIN_BASE_VERSION) || (mgmt_class != MCLASS_SUBN_ADM) || (class_version < OP_ROUTE_MIN_SA_CLASS_VERSION) || (class_method != SUBN_ADM_GETMULTI) || mad_status.AsReg16 || (attr_id != SA_ATTRIB_JOB_ROUTE_RECORD) || (attr_mod_2 != attr_mod) ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get wire status in recv data & process common errors if ((len_recv -= 1) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } p_data_wire = (uint8_t *)(p_mad_recv->Data); wstatus = (enum op_route_wire_status) *p_data_wire++; if (wstatus == OP_ROUTE_WIRE_STATUS_INVALID_JOB) { rstatus = OP_ROUTE_STATUS_INVALID_JOB; goto cleanup; } if (wstatus == OP_ROUTE_WIRE_STATUS_ERROR) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Process recv data based on attribute modifier switch (attr_mod) { case OP_ROUTE_AMOD_CLASS_INFO: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data if ((len_recv -= 16) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check JM class version p_data_wire += 1; if ( (temp = op_route_ntoh(&p_data_wire, sizeof(uint8_t))) < OP_ROUTE_MIN_JM_CLASS_VERSION ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get RespTimeValue and calculate num_timeout_ms; instead of doubling // RespTimeValue-based value, could add PortInfo:SubnetTimeout p_data_wire += 5; num_timeout_ms = ( 2 * 4096 * (1L << (op_route_ntoh(&p_data_wire, sizeof(uint8_t)) & 0x1F)) ) / 1000000; // Verify redirection GID is 0 if ((temp = op_route_ntoh(&p_data_wire, sizeof(uint64_t)))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Ignore remaining received data len_recv = 0; break; } // End of case OP_ROUTE_AMOD_CLASS_INFO case OP_ROUTE_AMOD_CREATE_JOB: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) rstatus = OP_ROUTE_STATUS_OK_PARTIAL; // Check for minimum amount of received data if ( ( len_recv -= ( sizeof(OP_ROUTE_JOB_ID) + ((num_guids + 2) * sizeof(uint16_t)) ) ) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get job ID job_id = op_route_ntoh(&p_data_wire, sizeof(OP_ROUTE_JOB_ID)); // Get switch map if ((len_data_alloc = num_guids * sizeof(uint16_t))) { if (!(p_switch_indices = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } num_guids_2 = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); num_switches = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); if ( (num_guids_2 != num_guids) || (num_switches > OP_ROUTE_MAX_NUM_SWITCHES) || (num_switches > num_guids) ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } for ( ix = num_guids, p_switch_indices_2 = p_switch_indices; ix > 0; p_switch_indices_2++, ix-- ) { if ( ( ( *p_switch_indices_2 = op_route_ntoh(&p_data_wire, sizeof(uint16_t)) ) >= num_switches ) && (*p_switch_indices_2 != 0xFFFF) ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } } } // Check for minimum amount of received data if ((len_recv -= sizeof(uint16_t)) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get cost matrix; comes in as 'top right' half num_switches_2 = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); if (num_switches_2 != num_switches) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } if ((len_data_alloc = num_switches * num_switches * sizeof(uint16_t))) { // Check amount of received data if ( ( len_recv -= ( (num_switches * (num_switches - 1)) * sizeof(uint16_t) / 2 ) ) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } if (!(p_cost_matrix_2 = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } for ( ix = num_switches * (num_switches - 1) / 2, ix_2 = num_switches - 1, p_cost_matrix_r = p_cost_matrix_2 + 1, p_cost_matrix_l = p_cost_matrix_2 + num_switches; ix > 0; p_cost_matrix_r += (num_switches + 1), p_cost_matrix_l += (num_switches + 1), ix_2-- ) for ( ix_3 = ix_2, p_cost_matrix_r2 = p_cost_matrix_r, p_cost_matrix_l2 = p_cost_matrix_l; (ix_3 > 0) && (ix > 0); p_cost_matrix_r2++, p_cost_matrix_l2 += num_switches, ix_3--, ix-- ) *p_cost_matrix_r2 = *p_cost_matrix_l2 = (uint16_t)op_route_ntoh(&p_data_wire, sizeof(uint16_t)); } // Return data to caller if (!(optn_query & OP_ROUTE_CREATE_JOB_NO_CREATE)) *p_job_id = job_id; p_switch_map->num_switches = num_switches; p_switch_map->p_switch_indices = p_switch_indices; *pp_cost_matrix = p_cost_matrix_2; p_switch_indices = NULL; p_cost_matrix_2 = NULL; break; } // End of case OP_ROUTE_AMOD_CREATE_JOB case OP_ROUTE_AMOD_SET_USE_MATRIX: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } break; } // End of case OP_ROUTE_AMOD_SET_USE_MATRIX case OP_ROUTE_AMOD_POLL_READY: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get job_status if ((len_recv -= 1) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } *p_job_status = (enum op_route_job_status) *p_data_wire; break; } // End of case OP_ROUTE_AMOD_POLL_READY case OP_ROUTE_AMOD_COMPLETE_JOB: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } break; } // End of case OP_ROUTE_AMOD_COMPLETE_JOB case OP_ROUTE_AMOD_GET_PORTGUID_VEC: { if (! p_guid_vec) { rstatus = OP_ROUTE_STATUS_INVALID_PARAM; goto cleanup; } // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data if ((len_recv -= sizeof(uint16_t)) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get number of port GUIDs num_guids = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); // Check for minimum amount of received data len_data_alloc = num_guids * sizeof(uint64_t); if ((len_recv -= len_data_alloc) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } if (len_data_alloc) { if (!(p_guids = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get port GUIDs for (ix = num_guids, p_guids_2 = p_guids; ix > 0; ix--) *p_guids_2++ = op_route_ntoh(&p_data_wire, sizeof(uint64_t)); } // Return data to caller p_guid_vec->num_guids = num_guids; p_guid_vec->p_guids = p_guids; break; } // End of case OP_ROUTE_AMOD_GET_PORTGUID_VEC case OP_ROUTE_AMOD_GET_SWITCH_MAP: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data if ((len_recv -= (sizeof(uint16_t) + sizeof(uint16_t))) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get num_guids and num_switches num_guids = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); num_switches = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); if ( (num_switches > OP_ROUTE_MAX_NUM_SWITCHES) || (num_switches > num_guids) ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data len_data_alloc = num_guids * sizeof(uint16_t); if ((len_recv -= len_data_alloc ) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } if (len_data_alloc) { if (!(p_switch_indices = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get switch map for ( ix = num_guids, p_switch_indices_2 = p_switch_indices; ix > 0; p_switch_indices_2++, ix-- ) { if ( ( ( *p_switch_indices_2 = op_route_ntoh(&p_data_wire, sizeof(uint16_t)) ) >= num_switches ) && (*p_switch_indices_2 != 0xFFFF) ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } } } // Return data to caller p_switch_map->num_switches = num_switches; p_switch_map->p_switch_indices = p_switch_indices; p_switch_indices = NULL; break; } // End of case OP_ROUTE_AMOD_GET_SWITCH_MAP case OP_ROUTE_AMOD_GET_COST_MATRIX: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data if ((len_recv -= sizeof(uint16_t)) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } num_switches = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); if ( ( len_recv -= ( (num_switches * (num_switches - 1)) * sizeof(uint16_t) / 2 ) ) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } len_data_alloc = num_switches * num_switches * sizeof(uint16_t); if (len_data_alloc) { if (!(p_cost_matrix_2 = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get cost_matrix for ( ix = num_switches * (num_switches - 1) / 2, ix_2 = num_switches - 1, p_cost_matrix_r = p_cost_matrix_2 + 1, p_cost_matrix_l = p_cost_matrix_2 + num_switches; ix > 0; p_cost_matrix_r += (num_switches + 1), p_cost_matrix_l += (num_switches + 1), ix_2-- ) for ( ix_3 = ix_2, p_cost_matrix_r2 = p_cost_matrix_r, p_cost_matrix_l2 = p_cost_matrix_l; (ix_3 > 0) && (ix > 0); p_cost_matrix_r2++, p_cost_matrix_l2 += num_switches, ix_3--, ix-- ) *p_cost_matrix_r2 = *p_cost_matrix_l2 = (uint16_t)op_route_ntoh(&p_data_wire, sizeof(uint16_t)); } // Return data to caller *pp_cost_matrix = p_cost_matrix_2; p_cost_matrix_2 = NULL; break; } // End of case OP_ROUTE_AMOD_GET_COST_MATRIX case OP_ROUTE_AMOD_GET_USE_MATRIX: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data if ( ( len_recv -= ( sizeof(uint16_t) + sizeof(uint8_t) + sizeof(uint16_t) ) ) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get multiplier, default use, and number of use elements use_multiplier = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); default_use = *p_data_wire++; num_elements = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); // Check for minimum amount of received data if ( ( len_recv -= ( num_elements * ( sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint8_t) ) ) ) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } len_data_alloc = num_elements * sizeof(struct op_route_use_element); if (len_data_alloc) { if (!(p_use_elements = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get use elements for ( ix = num_elements, p_use_elements_2 = p_use_elements; ix > 0; p_use_elements_2++, ix-- ) { temp = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); p_use_elements_2->bursty = (temp >> 15) & 0x0001; p_use_elements_2->switch_index = temp & 0x7FFF; p_use_elements_2->dlid = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); p_use_elements_2->use = *p_data_wire++; } } // Return data to caller p_use_matrix->default_use = default_use; p_use_matrix->multiplier = use_multiplier; p_use_matrix->num_elements = num_elements; p_use_matrix->p_elements = p_use_elements; break; } // End of case OP_ROUTE_AMOD_GET_USE_MATRIX case OP_ROUTE_AMOD_GET_JOBS: { // Check wire status if (wstatus == OP_ROUTE_WIRE_STATUS_OK_PARTIAL) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Check for minimum amount of received data if ((len_recv -= sizeof(uint16_t)) < 0) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get number of jobs num_jobs = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); // Check for minimum amount of received data if ( ( len_recv -= ( num_jobs * ( sizeof(OP_ROUTE_JOB_ID) + sizeof(uint64_t) + sizeof(uint16_t) + OP_ROUTE_MAX_JOB_NAME_LEN + OP_ROUTE_MAX_APP_NAME_LEN + sizeof(uint64_t) + sizeof(uint64_t) ) ) ) < 0 ) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } if ((len_data_alloc = num_jobs * sizeof(struct op_route_job_info))) { if (!(p_job_info = calloc(1, len_data_alloc))) { rstatus = OP_ROUTE_STATUS_ERROR; goto cleanup; } // Get list of job info for ( ix = num_jobs, p_job_info_2 = p_job_info; ix > 0; p_job_info_2++, ix-- ) { p_job_info_2->job_id = op_route_ntoh(&p_data_wire, sizeof(OP_ROUTE_JOB_ID)); temp = op_route_ntoh(&p_data_wire, sizeof(uint64_t)); p_job_info_2->time_stamp = temp; temp = op_route_ntoh(&p_data_wire, sizeof(uint16_t)); p_job_info_2->routed = (temp >> 1) & 0x0001; p_job_info_2->has_use = temp & 0x0001; strncpy( p_job_info_2->params.name, (char *)p_data_wire, OP_ROUTE_MAX_JOB_NAME_LEN ); p_data_wire += OP_ROUTE_MAX_JOB_NAME_LEN; strncpy( p_job_info_2->params.application_name, (char *)p_data_wire, OP_ROUTE_MAX_APP_NAME_LEN ); p_data_wire += OP_ROUTE_MAX_APP_NAME_LEN; p_job_info_2->params.pid = op_route_ntoh(&p_data_wire, sizeof(uint64_t)); p_job_info_2->params.uid = op_route_ntoh(&p_data_wire, sizeof(uint64_t)); } } // Return data to caller p_job_list->num_jobs = num_jobs; p_job_list->p_job_info = p_job_info; break; } // End of case OP_ROUTE_AMOD_GET_JOBS // Invalid attribute modifier default: printf("op_route_send_recv_query: invalid AMOD:%d\n", attr_mod); rstatus = OP_ROUTE_STATUS_ERROR; } // End of switch (attr_mod) cleanup: // Deallocate local buffers if necessary if (p_mad_send) free(p_mad_send); if (p_mad_recv) free(p_mad_recv); if (p_switch_indices) free(p_switch_indices); if (p_cost_matrix_2) free(p_cost_matrix_2); return (rstatus); } // End of op_route_send_recv_query() /******************************************************************************* * * op_route_dump_job_info() * * Description: dump job_info structure to console. * * Inputs: * p_title - Pointer to dump title string * n_indent - Number of spaces to indent * p_job_info - Pointer to job_info * * Outputs: * job_info data */ static void op_route_dump_job_info( char * p_title, int n_indent, struct op_route_job_info * p_job_info ) { if (p_job_info) { printf( "%*sjob_info(%s): job_id:0x%"PRIX64"\n", n_indent, "", p_title, p_job_info->job_id ); printf( "%*s time_stamp:%"PRIu64" %s", n_indent, "", (uint64_t)p_job_info->time_stamp, ctime(&p_job_info->time_stamp) ); printf( "%*s route:%u use:%u\n", n_indent, "", p_job_info->routed, p_job_info->has_use ); printf( "%*s name:(%s) app:(%s)\n", n_indent, "", p_job_info->params.name, p_job_info->params.application_name ); printf( "%*s pid:0x%016"PRIX64" uid:0x%016"PRIX64"\n", n_indent, "", p_job_info->params.pid, p_job_info->params.uid ); } } // End of op_route_dump_job_info() /******************************************************************************* * * op_route_dump_job_list() * * Description: dump job_list to console. * * Inputs: * p_title - Pointer to dump title string * n_indent - Number of spaces to indent * p_job_list - Pointer to job_list * * Outputs: * job_list data */ static void op_route_dump_job_list( char * p_title, int n_indent, struct op_route_job_list * p_job_list ) { int ix; struct op_route_job_info * p_job_info; char bf_title2[81]; if (p_job_list) { printf( "%*sjob list(%s): num_jobs: %u\n", n_indent, "", p_title, p_job_list->num_jobs ); for ( ix = 0, p_job_info = p_job_list->p_job_info; (ix < p_job_list->num_jobs) && p_job_info; p_job_info++, ix++ ) { sprintf(bf_title2, "%d", ix); op_route_dump_job_info(bf_title2, n_indent+2, p_job_info); } } } // End of op_route_dump_job_list() /******************************************************************************* * * GLOBAL FUNCTIONS */ /******************************************************************************* * * op_route_dump() * * Description: top-level dump function for op_route data structures to console. * This function calls specific dump functions needed based on calling * parameters. * * Inputs: * p_title - Pointer to dump title string * fb_port_handles - Flag to dump port handles table * p_port_handle - Pointer to OP_ROUTE_PORT_HANDLE * p_job_id - Pointer to OP_ROUTE_JOB_ID * p_job_params - Pointer to job parameters * p_guid_vec - Pointer to portguid_vec * p_switch_map - Pointer to switch_map * p_cost_matrix - Pointer to cost_matrix * p_use_matrix - Pointer to use_matrix * p_job_info - Pointer to job_info * p_job_list - Pointer to job_list * * Outputs: * job information */ void op_route_dump( char * p_title, int fb_port_handles, OP_ROUTE_PORT_HANDLE * p_port_handle, OP_ROUTE_JOB_ID * p_job_id, struct op_route_job_parameters * p_job_params, struct op_route_portguid_vec * p_guid_vec, struct op_route_switch_map * p_switch_map, uint16_t * p_cost_matrix, struct op_route_use_matrix * p_use_matrix, struct op_route_job_info * p_job_info, struct op_route_job_list * p_job_list ) { int ix, ix_2, ix_3; int n_indent = 2; uint16_t num_switches; struct param_alloc_port_handle_entry * p_param_entry; printf("op_route_dump (%s):\n", p_title); if (fb_port_handles) { printf( "%*sport handles: alloc:%u inuse: %u\n", n_indent, "", param_port_handle.num_allocated, param_port_handle.num_used ); for ( ix = 0, p_param_entry = (struct param_alloc_port_handle_entry *) param_port_handle.p_params; ix < param_port_handle.num_allocated; p_param_entry++, ix++ ) printf( "%*s%d: port_h:0x%"PRIX64" GUID:0x%"PRIX64 " port_id:%d\n", n_indent+2, "", ix, p_param_entry->port_handle, p_param_entry->port_guid, p_param_entry->port_id ); } printf("%*sp_porthandle:0x%"PRIX64, n_indent, "", (uint64_t)p_port_handle); if (p_port_handle) printf(" porthandle:0x%"PRIX64, (uint64_t)*p_port_handle); printf("\n"); if (p_job_id) printf("%*sjob_id:0x%"PRIX64"\n", n_indent, "", (uint64_t)*p_job_id); if (p_job_params) { printf( "%*sjob_parameters: name:(%s) app:(%s)\n", n_indent, "", p_job_params->name, p_job_params->application_name ); printf( "%*spid:0x%016"PRIX64" uid:0x%016"PRIX64"\n", n_indent+2, "", p_job_params->pid, p_job_params->uid ); } if (p_guid_vec) { printf("%*sguid_vec: num_guids:%u\n", n_indent, "", p_guid_vec->num_guids); for (ix = 0; (ix < p_guid_vec->num_guids) && p_guid_vec->p_guids; ix++) printf( "%*s%5d: 0x%016"PRIX64"\n", n_indent+2, "", ix, p_guid_vec->p_guids[ix] ); } if (p_switch_map && p_guid_vec) { printf( "%*sswitch_map: num_switches:%u (num_guids:%u)\n", n_indent, "", p_switch_map->num_switches, p_guid_vec->num_guids ); for ( ix = 0; (ix < p_guid_vec->num_guids) && p_switch_map->p_switch_indices; ix++ ) printf( "%*s%5d: %5u\n", n_indent+2, "", ix, p_switch_map->p_switch_indices[ix] ); } if ( p_cost_matrix && p_switch_map && ((num_switches = p_switch_map->num_switches)) ) { printf( "%*scost_matrix: (num_switches:%u)\n", n_indent, "", num_switches ); for (ix = 0; ix < num_switches; ix++) printf("%*s%4d", ix ? 1 : n_indent+6, "", ix); printf("\n"); for (ix = 0, ix_2 = 0; ix < num_switches; ix++) { printf("%*s%4d:", n_indent, "", ix); for (ix_3 = 0; ix_3 < num_switches; ix_3++) printf(" %04X", p_cost_matrix[ix_2++]); printf("\n"); } } if (p_use_matrix) { printf( "%*suse_matrix: num_elements:%u default_use:%u multiplier:%u\n", n_indent, "", p_use_matrix->num_elements, p_use_matrix->default_use, p_use_matrix->multiplier ); for ( ix = 0; (ix < p_use_matrix->num_elements) && p_use_matrix->p_elements; ix++ ) printf( "%*s%d: sw_index:%5d dlid:0x%04X use:%u bursty:%u\n", n_indent+2, "", ix, p_use_matrix->p_elements[ix].switch_index, p_use_matrix->p_elements[ix].dlid, p_use_matrix->p_elements[ix].use, p_use_matrix->p_elements[ix].bursty ); } op_route_dump_job_info(p_title, n_indent, p_job_info); op_route_dump_job_list(p_title, n_indent, p_job_list); } // End of op_route_dump() /******************************************************************************* * * op_route_init_param() * * Description: * Initialize the specified parameter structure. * * Inputs: * p_param - Pointer to parameter structure * size_param - Size of parameter entry * num_alloc - Num of params in allocation unit * * Outputs: * 0 - Initialization successful * -1 - Initialization unsuccessful */ int op_route_init_param( struct op_route_param_alloc * p_param, uint32_t size_param, uint32_t num_alloc ) { void * p_alloc; if (!p_param || !size_param || !num_alloc) return (-1); if ((p_alloc = calloc(num_alloc, size_param))) { p_param->size_param = size_param; p_param->num_allocated = num_alloc; p_param->num_used = 0; p_param->num_alloc = num_alloc; p_param->p_params = p_alloc; } else return (-1); return (0); } // End of op_route_init_param() /******************************************************************************* * * op_route_free_param() * * Description: * Free parameters for the specified parameter structure. * * Inputs: * p_param - Pointer to parameter structure * * Outputs: * none */ void op_route_free_param(struct op_route_param_alloc * p_param) { if (p_param && p_param->p_params) { free(p_param->p_params); p_param->num_allocated = 0; p_param->num_used = 0; p_param->p_params = NULL; } } // End of op_route_free_param() /******************************************************************************* * * op_route_alloc_param() * * Description: * Allocate a block of parameters according to specified parameter pointer. * * Inputs: * p_param - Pointer to parameter structure * * Outputs: * 0 - Allocation successful * -1 - Allocation unsuccessful */ int op_route_alloc_param(struct op_route_param_alloc * p_param) { char * p_alloc; if (!p_param) return (-1); if ( ( p_alloc = realloc( p_param->p_params, p_param->size_param * (p_param->num_allocated + p_param->num_alloc) ) ) ) { memset( p_alloc + (p_param->size_param * p_param->num_allocated), 0, p_param->size_param * p_param->num_alloc ); p_param->p_params = (struct op_route_param_alloc_entry *)p_alloc; p_param->num_allocated += p_param->num_alloc; } else return (-1); return (0); } // End of op_route_alloc_param() /******************************************************************************* * * op_route_dump_param() * * Description: dump information, as supplied by calling parameters, to console. * * Inputs: * p_title - Pointer to dump title string * p_param - Pointer to parameter structure * * Outputs: * Allocated parameter information */ void op_route_dump_param( char * p_title, struct op_route_param_alloc * p_param ) { int ix; struct op_route_param_alloc_entry * p_param_entry; printf("op_route_dump_param (%s):\n", p_title); if (p_param) { printf( " sz_param:%"PRIu64" allocd:%u used:%u alloc:%u p_params:0x%"PRIX64"\n", p_param->size_param, p_param->num_allocated, p_param->num_used, p_param->num_alloc, (uint64_t)p_param->p_params ); for (ix = 0; ix < p_param->num_allocated; ix++) { p_param_entry = (struct op_route_param_alloc_entry *) ((char *)p_param->p_params + (p_param->size_param * ix)); printf( " %4d: sz_param:%"PRIu64" sz_data:%"PRIu64" info_param:%" PRIu64" data64:0x%016"PRIX64"\n", ix, p_param_entry->size_param, p_param_entry->size_data, p_param_entry->info_param, *(uint64_t *)p_param_entry->bf_data ); } } } // End of op_route_dump_param() /******************************************************************************* * * op_route_get_status_text() * * Description: * Get a text string corresponding to the specified op_route_status value. * * Inputs: * status - Status value * * Outputs: * status string - Valid status value * NULL - Invalid status value */ extern char * op_route_get_status_text(enum op_route_status status) { switch(status) { case OP_ROUTE_STATUS_OK: return("No Error"); case OP_ROUTE_STATUS_OK_PARTIAL: return("Partial Success"); case OP_ROUTE_STATUS_ERROR: return("General Error"); case OP_ROUTE_STATUS_SEND_ERROR: return("Send Error"); case OP_ROUTE_STATUS_RECV_ERROR: return("Receive Error"); case OP_ROUTE_STATUS_TIMEOUT: return("Response Timeout"); case OP_ROUTE_STATUS_INVALID_JOB: return("Invalid Job ID"); case OP_ROUTE_STATUS_INVALID_PARAM: return("Invalid Parameter"); default: return(NULL); } // End of switch(status) } // End of op_route_get_status_text() /******************************************************************************* * * op_route_open() * * Description: * Open a routing connection by registering with SD. Each successful open * creates an OP_ROUTE_PORT_HANDLE, which is entered in the port table, * and passed back to the caller. * * Inputs: * port - Pointer to opamgt handler * port_guid - GUID of port on which to register * p_port_handle - Pointer to OP_ROUTE_PORT_HANDLE * * Outputs: * OP_ROUTE_STATUS_OK - Open successful * *p_port_handle = OP_ROUTE_PORT_HANDLE * OP_ROUTE_STATUS_INVALID_PARAM - p_port_handle NULL * OP_ROUTE_STATUS_ERROR - port_guid already registered * - Unable to allocate memory * - Register error */ enum op_route_status op_route_open( struct omgt_port * port, uint64_t port_guid, OP_ROUTE_PORT_HANDLE * p_port_handle ) { uint32_t ix, ix_2; int fb_break; struct param_alloc_port_handle_entry * p_param_entry, * p_param_entry_2; if (!p_port_handle) return (OP_ROUTE_STATUS_INVALID_PARAM); // Check for port_guid already in port table if (!param_port_handle.num_allocated) if ( op_route_init_param( &param_port_handle, sizeof(struct param_alloc_port_handle_entry), NUM_PARAM_PORT_HANDLE_ALLOC ) ) return (OP_ROUTE_STATUS_ERROR); for ( ix = param_port_handle.num_allocated, p_param_entry = (struct param_alloc_port_handle_entry *)param_port_handle.p_params; ix > 0; p_param_entry++, ix-- ) if (p_param_entry->port_guid == port_guid) return (OP_ROUTE_STATUS_ERROR); // Find first available port table entry and register if (param_port_handle.num_used == param_port_handle.num_allocated) if (op_route_alloc_param(&param_port_handle)) return (OP_ROUTE_STATUS_ERROR); for ( ix = param_port_handle.num_allocated, p_param_entry = (struct param_alloc_port_handle_entry *)param_port_handle.p_params; ix > 0; p_param_entry++, ix-- ) { if (!p_param_entry->port_handle) { // Get port_handle and check for duplicate for (fb_break = FALSE; !fb_break; ) { p_param_entry->port_handle = op_route_gen_port_handle(); for ( ix_2 = param_port_handle.num_allocated, p_param_entry_2 = (struct param_alloc_port_handle_entry *)param_port_handle.p_params; ; p_param_entry_2++ ) { if ( (ix_2 != ix) && ( p_param_entry->port_handle == p_param_entry_2->port_handle ) ) break; // Get another port_handle else if (--ix_2 == 0) { fb_break = TRUE; break; } } } p_param_entry->port_guid = port_guid; *p_port_handle = p_param_entry->port_handle; // Get ClassPortInfo if ( op_route_send_recv_query( OP_ROUTE_AMOD_CLASS_INFO, *p_port_handle, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, port ) == OP_ROUTE_STATUS_OK ) { param_port_handle.num_used++; return (OP_ROUTE_STATUS_OK); } p_param_entry->port_handle = 0; p_param_entry->port_guid = 0; p_param_entry->port_id = 0; break; } // End of if (!!p_param_entry->port_handle) } // End of for ( ix = param_port_handle.num_allocated; ix > 0; return (OP_ROUTE_STATUS_ERROR); } // End of op_route_open() /******************************************************************************* * * op_route_close() * * Description: * Close a routing connection by deregistering with SD. Remove the * specified OP_ROUTE_PORT_HANDLE from the port table. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * * Outputs: * OP_ROUTE_STATUS_OK - Close successful * OP_ROUTE_STATUS_INVALID_PARAM - Invalid OP_ROUTE_PORT_HANDLE */ enum op_route_status op_route_close(OP_ROUTE_PORT_HANDLE port_handle) { struct param_alloc_port_handle_entry * p_param_entry; if ((p_param_entry = op_route_get_port_handle_entry(port_handle))) { p_param_entry->port_handle = 0; p_param_entry->port_guid = 0; p_param_entry->port_id = 0; param_port_handle.num_used--; return (OP_ROUTE_STATUS_OK); } return (OP_ROUTE_STATUS_INVALID_PARAM); } // End of op_route_close() /******************************************************************************* * * op_route_create_job() * * Description: * Create a job on the specified port_handle, using the specified name and * port GUID vector. Upon job creation, supply a job ID, switch map and * cost matrix. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * optn_create - Options for create job (see OP_ROUTE_CREATE_JOB_xx) * p_job_params - Pointer to job parameters * p_guid_vec - Pointer to portguid_vec * port - Pointer to opamgt handle * p_job_id - Pointer to OP_ROUTE_JOB_ID * p_switch_map - Pointer to switch_map * pp_cost_matrix - Pointer to pointer to cost_matrix * * Outputs: * OP_ROUTE_STATUS_OK - Create successful * *p_switch_map->p_switch_indices = * created switch_map * **pp_cost_matrix = created cost_matrix * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * - p_job_params NULL * - p_guid_vec NULL * - p_job_id NULL * - p_switch_map NULL * - pp_cost_matrix NULL * TBD restore next if OP_ROUTE_MIN_NUM_GUIDS raised above 0 * ( - guid_vec too small ) * OP_ROUTE_STATUS_ERROR - Error during create */ enum op_route_status op_route_create_job( OP_ROUTE_PORT_HANDLE port_handle, uint16_t optn_create, struct op_route_job_parameters * p_job_params, struct op_route_portguid_vec * p_guid_vec, struct omgt_port * port, OP_ROUTE_JOB_ID * p_job_id, // output struct op_route_switch_map * p_switch_map, // output uint16_t ** pp_cost_matrix ) // output { enum op_route_status rstatus; if ( !port_handle || !p_job_params || !p_guid_vec || ( !p_job_id && !(optn_create & OP_ROUTE_CREATE_JOB_NO_CREATE) ) || !p_switch_map || !pp_cost_matrix /* TBD restore if #define changed || (p_guid_vec->num_guids < OP_ROUTE_MIN_NUM_GUIDS) */ ) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query CreateJob record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_CREATE_JOB, port_handle, optn_create, p_job_id, NULL, p_job_params, p_guid_vec, p_switch_map, pp_cost_matrix, NULL, NULL, port ); return (rstatus); } // End of op_route_create_job() /******************************************************************************* * * op_route_complete_job() * * Description: * Complete the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * p_job_id - Pointer to OP_ROUTE_JOB_ID * port - Pointer to opamgt port handler * * Outputs: * OP_ROUTE_STATUS_OK - Completion successful * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * OP_ROUTE_STATUS_ERROR - Error during completion */ enum op_route_status op_route_complete_job( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port ) { enum op_route_status rstatus; if (!port_handle) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query CompleteJob record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_COMPLETE_JOB, port_handle, 0, &job_id, NULL, NULL, NULL, NULL, NULL, NULL, NULL, port ); return (rstatus); } // End of op_route_complete_job() /******************************************************************************* * * op_route_get_portguid_vec() * * Description: * Get port GUID vector for the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * job_id - OP_ROUTE_JOB_ID * port - Pointer to opamgt handle * p_portguid_vec - Pointer to portguid_vec * * Outputs: * OP_ROUTE_STATUS_OK - Get successful * *p_portguid_vec = portguid_vec * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * - p_guid_vec NULL * OP_ROUTE_STATUS_ERROR - Error during get */ enum op_route_status op_route_get_portguid_vec( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port, struct op_route_portguid_vec * p_guid_vec ) // output { enum op_route_status rstatus; if (!port_handle || !p_guid_vec) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query PortGUID vector record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_GET_PORTGUID_VEC, port_handle, 0, &job_id, NULL, NULL, p_guid_vec, NULL, NULL, NULL, NULL, port ); return (rstatus); } // End of op_route_get_portguid_vec() /******************************************************************************* * * op_route_release_portguid_vec() * * Description: * Release the specified portguid_vec. * * Inputs: * p_guid_vec - Pointer to portguid_vec * * Outputs: * none */ void op_route_release_portguid_vec(struct op_route_portguid_vec * p_guid_vec) { if (p_guid_vec && p_guid_vec->p_guids) { free(p_guid_vec->p_guids); p_guid_vec->p_guids = NULL; } } // End of op_route_release_portguid_vec() /******************************************************************************* * * op_route_get_switch_map() * * Description: * Get switch_map for the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * job_id - OP_ROUTE_JOB_ID * port - Pointer to opamgt handle * p_switch_map - Pointer to switch_map * * Outputs: * OP_ROUTE_STATUS_OK - Get successful * *p_switch_map = switch_map * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * - p_switch_map NULL * OP_ROUTE_STATUS_ERROR - Error during get */ enum op_route_status op_route_get_switch_map( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port, struct op_route_switch_map * p_switch_map ) // output { enum op_route_status rstatus; if (!port_handle || !p_switch_map) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query switch_map record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_GET_SWITCH_MAP, port_handle, 0, &job_id, NULL, NULL, NULL, p_switch_map, NULL, NULL, NULL, port ); return (rstatus); } // End of op_route_get_switch_map() /******************************************************************************* * * op_route_release_switch_map() * * Description: * Release the specified switch_map. * * Inputs: * p_op_route_switch_map - Pointer to switch_map * * Outputs: * none */ void op_route_release_switch_map(struct op_route_switch_map * p_switch_map) { if (p_switch_map && p_switch_map->p_switch_indices) { free(p_switch_map->p_switch_indices); p_switch_map->p_switch_indices = NULL; } } // End of op_route_release_switch_map() /******************************************************************************* * * op_route_get_cost_matrix() * * Description: * Get cost_matrix for the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * job_id - OP_ROUTE_JOB_ID * port - Pointer to opamgt handle * pp_cost_matrix - Pointer to pointer to cost_matrix * * Outputs: * OP_ROUTE_STATUS_OK - Get successful * **pp_cost_matrix = cost_matrix * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * - pp_cost_matrix NULL * OP_ROUTE_STATUS_ERROR - Error during get */ enum op_route_status op_route_get_cost_matrix( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port, uint16_t ** pp_cost_matrix ) // output { enum op_route_status rstatus; if (!port_handle || !pp_cost_matrix) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query cost_matrix record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_GET_COST_MATRIX, port_handle, 0, &job_id, NULL, NULL, NULL, NULL, pp_cost_matrix, NULL, NULL, port ); return (rstatus); } // End of op_route_get_cost_matrix() /******************************************************************************* * * op_route_release_cost_matrix() * * Description: * Release the specified cost_matrix. * * Inputs: * p_cost_matrix - Pointer to cost_matrix * * Outputs: * none */ void op_route_release_cost_matrix(uint16_t * p_cost_matrix) { if (p_cost_matrix) free(p_cost_matrix); } // End of op_route_release_cost_matrix() /******************************************************************************* * * op_route_get_use_matrix() * * Description: * Get use_matrix for the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * job_id - OP_ROUTE_JOB_ID * port - Pointer to opamgt handle * p_use_matrix - Pointer to use_matrix * * Outputs: * OP_ROUTE_STATUS_OK - Get successful * *p_use_matrix = use_matrix * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * - p_use_matrix NULL * OP_ROUTE_STATUS_ERROR - Error during get */ enum op_route_status op_route_get_use_matrix( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port, struct op_route_use_matrix * p_use_matrix ) { enum op_route_status rstatus; if (!port_handle || !p_use_matrix) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query use_matrix record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_GET_USE_MATRIX, port_handle, 0, &job_id, NULL, NULL, NULL, NULL, NULL, p_use_matrix, NULL, port ); return (rstatus); } // End of op_route_get_use_matrix() /******************************************************************************* * * op_route_set_use_matrix() * * Description: * Set use_matrix for the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * job_id - OP_ROUTE_JOB_ID * port - Pointer to opamgt handle * p_use_matrix - Pointer to use_matrix * * Outputs: * OP_ROUTE_STATUS_OK - Set successful * OP_ROUTE_STATUS_INVALID_PARAM - port_handle NULL * - p_use_matrix NULL * OP_ROUTE_STATUS_ERROR - Error during set */ enum op_route_status op_route_set_use_matrix( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port, struct op_route_use_matrix * p_use_matrix ) { enum op_route_status rstatus; if (!port_handle || !p_use_matrix) return (OP_ROUTE_STATUS_INVALID_PARAM); // Send set use_matrix record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_SET_USE_MATRIX, port_handle, 0, &job_id, NULL, NULL, NULL, NULL, NULL, p_use_matrix, NULL, port ); return (rstatus); } // End of op_route_set_use_matrix() /******************************************************************************* * * op_route_release_use_matrix() * * Description: * Release the specified use_matrix. * * Inputs: * p_use_matrix - Pointer to use_matrix * * Outputs: * none */ void op_route_release_use_matrix(struct op_route_use_matrix * p_use_matrix) { if (p_use_matrix && p_use_matrix->p_elements) { free(p_use_matrix->p_elements); p_use_matrix->p_elements = NULL; } } // End of op_route_release_use_matrix() /******************************************************************************* * * op_route_get_job_list() * * Description: * Get a list of created jobs on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * port - Pointer to opamgt handle * p_job_list - Pointer to job list (array of job_info) * * Outputs: * OP_ROUTE_STATUS_OK - Get successful * *p_job_list = job_list * OP_ROUTE_STATUS_ERROR - Error during get */ enum op_route_status op_route_get_job_list( OP_ROUTE_PORT_HANDLE port_handle, struct omgt_port * port, struct op_route_job_list * p_job_list) // output { enum op_route_status rstatus; if (!port_handle || !p_job_list) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query JobList record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_GET_JOBS, port_handle, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, p_job_list, port ); return (rstatus); } // End of op_route_get_job_list() /******************************************************************************* * * op_route_release_job_list() * * Description: * Release the specified job_list. * * Inputs: * p_job_list - Pointer to job_list * * Outputs: * none */ void op_route_release_job_list(struct op_route_job_list * p_job_list) { if (p_job_list && p_job_list->p_job_info) { free(p_job_list->p_job_info); p_job_list->p_job_info = NULL; } } // End of op_route_release_job_list() /******************************************************************************* * * op_route_poll_ready() * * Description: * Poll for the status of the specified job on the specified port_handle. * * Inputs: * port_handle - OP_ROUTE_PORT_HANDLE * job_id - OP_ROUTE_JOB_ID * port - Pointer to opamgt handle * p_job_status - Pointer to job status * * Outputs: * OP_ROUTE_STATUS_OK - Poll successful * *p_job_status = job status * OP_ROUTE_STATUS_INVALID_PARAM - port_handle invalid * - p_job_status NULL * OP_ROUTE_STATUS_ERROR - Poll unsuccessful */ enum op_route_status op_route_poll_ready( OP_ROUTE_PORT_HANDLE port_handle, OP_ROUTE_JOB_ID job_id, struct omgt_port * port, enum op_route_job_status * p_job_status ) { enum op_route_status rstatus; if (!port_handle || !p_job_status) return (OP_ROUTE_STATUS_INVALID_PARAM); // Query PollReady record rstatus = op_route_send_recv_query( OP_ROUTE_AMOD_POLL_READY, port_handle, 0, &job_id, p_job_status, NULL, NULL, NULL, NULL, NULL, NULL, port ); return (rstatus); } // End of op_route_poll_ready() // End of file
<reponame>edisonlee0212/UniEngine #pragma once #include <ISerializable.hpp> #include <IPrivateComponent.hpp> #include <Entity.hpp> namespace UniEngine { class Scene; struct EntityMetadata : public ISerializable { std::string m_name; unsigned m_version = 1; bool m_enabled = true; Entity m_parent = Entity(); std::vector<PrivateComponentElement> m_privateComponentElements; std::vector<Entity> m_children; size_t m_dataComponentStorageIndex = 0; size_t m_chunkArrayIndex = 0; void Serialize(YAML::Emitter &out) override; void Deserialize(const YAML::Node &in) override; void Clone(const EntityMetadata &source, const std::shared_ptr<Scene> &scene); }; } // namespace UniEngine
<reponame>jamesrwhite/minicache package client import ( "net" log "github.com/Sirupsen/logrus" "github.com/jamesrwhite/minicached/store" ) type Client struct { Id string Connection net.Conn State uint8 Input string Command string Record *store.Record } const STATE_DEFAULT uint8 = 1 const STATE_EXPECTING_VALUE uint8 = 2 const STATE_COMMAND_GET uint8 = 3 const STATE_COMMAND_SET uint8 = 4 const STATE_COMMAND_DELETE uint8 = 5 const STATE_COMMAND_QUIT uint8 = 6 const STATE_COMMAND_FLUSH_ALL uint8 = 7 // Reset a clients state to what it would be on first connection func (client *Client) Reset() { log.WithFields(log.Fields{ "event": "client_reset", "client_state": client.State, "connection_id": client.Connection.RemoteAddr().String(), }).Debug("Client Reset") client.State = STATE_DEFAULT client.Input = "" client.Command = "" client.Record = &store.Record{} }
def update_store_from_ical(): icspath = gconfig.get('Ical', 'path') logging.debug("Update meeting store from %s", icspath) levents = get_events(icspath, 90) counter = 0 uidlist = [] for e in levents : data = make_data_from_event(e) update_store_with_data(data) uidlist.append(data['uid']) counter += 1 for uid in get_events_from_store(): if uid not in uidlist : remove_event_from_store(uid) logging.info("%d events read and created/updated", counter) return(counter)
<reponame>YuKongEr/yRpc<filename>src/main/java/cn/yukonga/yrpc/server/annotation/EnableYRpcServer.java package cn.yukonga.yrpc.server.annotation; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Import; import org.springframework.core.annotation.AliasFor; import java.lang.annotation.*; /** * @author : yukong */ @Target(ElementType.TYPE) @Retention(RetentionPolicy.RUNTIME) @Documented @Import(YRpcServerRegistrar.class) @ComponentScan public @interface EnableYRpcServer { String[] value() default {}; @AliasFor(annotation = ComponentScan.class, attribute = "basePackages") String[] basePackages() default {}; @AliasFor(annotation = ComponentScan.class, attribute = "basePackageClasses") Class<?>[] basePackageClasses() default {}; }
Surviving the World A Photocomic Education by Dante Shepherd Lesson #903 - Slippery Slopes I'd actually written and prepped most of this comic before Maryland held its debate on the gay marriage bill on Friday, but considering how it turned out (and considering I'm currently living here), it feels appropriate to get this one posted today. There were a lot of positives that could be taken out of the debate, even by those of us who supported it and even though it failed. First, when I was listening to the debate, there were very few people planning on voting against it who got up and spoke. Secondly, those who did often did not cite religious arguments - considering this is a country founded on freedom of religion, that's certainly a factor to take some comfort in. Thirdly, there were a number of people who spoke in favor of the bill who recognized they would take heat from their family and constituents for their vote - and they didn't care. Look, some day, everyone is going to have to look back on this issue and explain to the youth of the future why it took us so frickin' long. And certainly, we can blame some of it on organizations like NOM, whose published talking points provided some of the initial slippery slope arguments in the flowchart. In the meantime, though? We can have rational arguments about it from opposing sides. And slippery slope arguments? THEY DON'T HOLD UP. Yes, some of the arguments above are ridiculous, but some of them? Shouldn't there be answers to them, too? Like one delegate said during the debate the other day: "Some people say if this bill passes, I will someday be able to marry a toaster, an android, or a cat. (This argument) sickens me." Because that argument completely ignores the perfectly fine counterpoint regarding consent. And such counterpoints are certainly not worse than the pro-sanctity-of-marriage counterpoints against divorce, right? In the meantime, I'm going to take heart in the speech on Friday by delegate Anne Healey (paraphrased): "I am going to make many constituents, friends and family who supported me for years disappointed in me. But I have not heard one legislative argument against this bill. I believe I must vote (for this bill)." Gay marriage for everyone will happen someday soon. Let's get to debating it rationally so we can get to that day sooner. Agreed?
def _GSA(self, sentence): keywords = ['NMEA', 'Mode', 'FixStatus', 'SatelliteUsed01', 'SatelliteUsed02', 'SatelliteUsed03', 'SatelliteUsed04', 'SatelliteUsed05', 'SatelliteUsed06', 'SatelliteUsed07', 'SatelliteUsed08', 'SatelliteUsed09', 'SatelliteUsed10', 'SatelliteUsed11', 'SatelliteUsed12', 'PDOP', 'HDOP', 'VDOP'] return self._mixhash(keywords, sentence)
Melioidosis presenting with mediastinal lymphadenopathy masquerading as malignancy: a case report Introduction Melioidosis, endemic in Thailand and in the Northern Territory of Australia is an emerging infectious disease in India which can present with varied forms. A case of melioidosis, presenting as a rare anterior mediastinal mass which can masquerade as malignancy or tuberculosis, is described here. With treatment, our patient initially showed an increase in the size of mediastinal node and development of new submandibular node.. To the best of our knowledge, this phenomenon has not been documented in the literature and the same is highlighted in this case report. Case Presentation A 43-year-old Asian man with diabetes presented with fever, loss of appetite, weight loss for one month and painful swelling below his left mandible for five days. An examination revealed an enlarged left submandibular lymph node and bilateral axillary lymph nodes. A chest X-ray showed mediastinal widening. Computed tomography of his thorax showed a lobulated heterogeneously enhancing anterior mediastinal mass encasing the superior vena cava suggestive of malignancy. An excision biopsy of the lymph node showed granulomas suggestive of tuberculosis but bone marrow culture and lymph node aspirate culture grew Burkholderia pseudomallei. He was treated with parenteral ceftazidime and amoxicillin-clavulanic acid. During the course of treatment, he developed an enlargement of the submandibular lymph node on the opposite side. It gradually subsided with the continuation of therapy orally with a combination of cotrimoxazole and doxycycline for six months. A repeat computed tomography chest scan showed resolution of the mediastinal mass. Conclusion Melioidosis can present as a mediastinal mass that mimics tuberculosis or malignancy. During the initial phase of treatment of melioidosis, the appearance of new lymph nodes or an increase in the size of the existing lymph nodes does not mean treatment failure. Inexperienced clinicians may consider this as treatment failure and may switch treatment. To the best of our knowledge, this is the first report documenting this phenomenon in melioidosis cases. Introduction Melioidosis is called a mimicker of maladies. In its acute form it can mimic any community acquired bacterial sepsis, pneumonia or abscess, especially that produced by staphylococcus. In its chronic form, it can mimic tuberculosis or malignancy . Melioidosis can present with subcutaneous abscesses and visceral abscesses in the liver, spleen, prostate, parotid, and lymph nodal mass . Melioidosis is endemic in Northern Australia, Thailand, Singapore, Malaysia, Myanmar and Vietnam . In India, it is sporadic with an increasing trend in the southern states . Burkholderia pseudomallei, the causative organism, is a gram negative, motile bacillus isolated from soil and surface water. The disease is acquired by inoculation through abraded skin, inhalation or ingestion . The majority of cases present during the rainy season . The incubation period ranges from 24 hours to many years . In an Australian study, chronic renal disease, chronic lung disease, and age > 45 years were independent risk factors for melioidosis . It produces necrotizing inflammation, abscess or granuloma with multinucleated giant cells . Clinical disease may present acutely with fever of less than two months duration or chronically with more than two months of fever with or without other symptoms such as cough, discharging sinus, and subcutaneous swellings. Localized disease presents as skin ulcers and subcutaneous abscesses or pneumonia. Disseminated disease can present with pneumonia, abscesses in the liver, spleen, kidney, prostate, skin and subcutaneous tissue, septic arthritis and osteomyelitis with or without septicemia. Lymph node swelling with necrosis can occur as part of either disseminated melioidosis or its local forms . However, mediastinal lymphadenopathy as a presenting feature of melioidosis is rare. We present a case of a patient with melioidosis which involved the mediastinal lymph node and mimicked malignancy and tuberculosis. Case presentation A 43-year-old Asian man with diabetes, a clerk in an office, presented with fever of one month and painful swelling below the left submandibular region of five days duration with a history of weight loss and poor appetite. An examination revealed a febrile patient with multiple lymph node swellings. His left submandibular lymph node was 2 × 1 cm and tender. His bilateral axillary nodes were palpable, the largest being 0.5 × 0.5 cm. Vital signs and systemic examination were normal. Investigations revealed neutrophilic leukocytosis with high erythrocyte sedimentation rate and uncontrolled diabetes mellitus. His chest X-ray showed widening of the right paratracheal region probably due to lymph node enlargement ( Figure 1). He was empirically started on intravenous amoxicillin-clavulanic acid and subcutaneous insulin. A computed tomography (CT) scan of his thorax showed an anterior mediastinal mass measuring 4.2 × 3.7 × 3 cm in the right paratracheal region suggestive of a malignant mass with a few upper lobe densities ( Figure 2). An excision biopsy of the submandibular swelling showed a non-caseating granuloma composed of lymphocytes and plasma cells suggestive of tuberculosis. A tuberculin skin test was negative. His bone marrow did not show acid fast bacilli or granuloma. Anti-tuberculosis treatment was not administered. Blood culture and bone marrow culture grew B. pseudomallei sensitive to amoxicillin-clavulanic acid, ceftazidime, piperacillin, cefaperazone-sulbactum, cotrimoxazole, doxycycline, tetracycline, meropenem and imipenem. He was treated for two weeks with ceftazidime (2 gm every six hours intravenously). During his hospital stay, he developed new right submandibular lymhadenopathy. However, the same treatment was continued. He became afebrile in eight days. He was discharged with oral co-trimoxazole (40/8 mg/kg) and doxycycline (100 mg) every 12 hours. At one month, a CT scan of his thorax revealed an increase in the right paratracheal lesion with the resolution of upper lobe densities. Treatment was continued with oral co-trimoxazole and doxycycline for six months and a repeated computed tomography (CT) scan of his chest was normal. Discussion Melioidosis cases can present as a mediastinal mass mimicking malignancy or tuberculosis. This is because of enlarged mediastinal lymph nodes with necrosis. They must be evaluated by fine needle aspiration and cytology (FNAC) or biopsy. Mediastinal lymphadenopathy has been reported in 3% of patients in a 20-year Darwin prospective study . Few other cases are reported in the literature with mediastinal lymphadenopathy . This could be secondary to inhalation or due to hematogenous spread following inoculation. Chronic melioidosis can be confused with tuberculosis clinically as well as histopathologically. Histopathology of these lesions can show granulomas. In our case it was a non-caseating granuloma and the tuberculin skin test was negative. The growth of B. pseudomallei settled the diagnosis in our patient and hence the ability to identify the growth is of paramount importance. This case report highlights the fact that encasement of the superior vena cava can occur occasionally in non-malignant conditions. The initial intensive phase of parenteral therapy for melioidosis is with ceftazidime 50 mg/kg (up to 2 g) every six hours or meropenem 25 mg/kg (up to1 g) every eight hours, or imipenem 25 mg/kg (up to 1 g) every six hours with or without sulfamethoxazole/trimethoprim 40/8 mg/kg (up to 1600/320 mg) every 12 hours for 10 to 14 days. Deep seated abscesses extensive pulmonary disease, osteomyelitis, septic arthritis and neurological melioidosis require four to eight weeks of the intensive phase. Subsequent oral eradication therapy is required to prevent recrudescence or relapse. Sulfamethoxazole/trimethoprim 40/8 mg/kg (up to 1600/320 mg) every 12 hours for three to six months is recommended . Another interesting feature of our case is the appearance of new submandibular swelling while our patient was receiving appropriate therapy. There was an increase in the size of mediastinal lymphadenopathy as evidenced by a repeated CT scan of thorax done at onemonth follow up. In a review by White, the author opined that the enlargement of an abscess or appearance of new abscesses, especially in skeletal muscle, or seeding to a joint, is not uncommon in the first week of treatment, and is not necessarily a sign of treatment failure . However, we did not find any documentation of this phenomenon in the literature. Two mechanisms can be put forth for this. The first is the initial progression of the infection which may sometimes be seen despite the use of appropriate antibiotics and its eventual resolution. Another concomitant or additional mechanism is that lymph node enlargement may represent an immune mediated response to infection, something similar to the paradoxical reaction seen in tuberculosis. Conclusion The appearance of new lymph nodes or an increase in the size of the existing lymph nodes does not mean treatment failure and the appropriate treatment has to be continued. Melioidosis can mimic tuberculosis and malignancy and always has to be considered in the differential diagnosis of lymphadenopathy with or without fever in endemic areas or in travellers returning from endemic areas. Consent Written informed consent was obtained from the patient for publication of this case report and any accompanying images. A copy of the written consent is available for review by the Editor-in-Chief of this journal
def load_data_nmt(batch_size, max_len, num_examples=1000): def preprocess_raw(text): text = text.replace('\u202f', ' ').replace('\xa0', ' ') out = '' for i, char in enumerate(text.lower()): if char in (',', '!', '.') and text[i-1] != ' ': out += ' ' out += char return out url = 'http://www.manythings.org/anki/fra-eng.zip' print("Downloading fra-eng.zip from '{0}'".format(url)) headers={"User-Agent": "XY"} response = requests.get(url,headers=headers ,stream=True) handle = BytesIO() for chunk in response.iter_content(chunk_size=512): if chunk: handle.write(chunk) with zipfile.ZipFile(handle, 'r') as f: raw_text = f.read('fra.txt').decode("utf-8") handle.close() text = preprocess_raw(raw_text) source, target = [], [] for i, line in enumerate(text.split('\n')): if i >= num_examples: break parts = line.split('\t') if len(parts) == 2: source.append(parts[0].split(' ')) target.append(parts[1].split(' ')) def build_vocab(tokens): tokens = [token for line in tokens for token in line] return Vocab(tokens, min_freq=3, use_special_tokens=True) src_vocab, tgt_vocab = build_vocab(source), build_vocab(target) def pad(line, max_len, padding_token): if len(line) > max_len: return line[:max_len] return line + [padding_token] * (max_len - len(line)) def build_array(lines, vocab, max_len, is_source): lines = [vocab[line] for line in lines] if not is_source: lines = [[vocab.bos] + line + [vocab.eos] for line in lines] array = torch.tensor([pad(line, max_len, vocab.pad) for line in lines]) valid_len = (array != vocab.pad).sum(1) return array, valid_len src_vocab, tgt_vocab = build_vocab(source), build_vocab(target) src_array, src_valid_len = build_array(source, src_vocab, max_len, True) tgt_array, tgt_valid_len = build_array(target, tgt_vocab, max_len, False) train_data = data.TensorDataset(src_array, src_valid_len, tgt_array, tgt_valid_len) train_iter = data.DataLoader(train_data, batch_size, shuffle=True) return src_vocab, tgt_vocab, train_iter
/** Tags or untags all messages in the conversation. Persists the change * to the database and cache. If the conversation includes at least one * unread {@link Message} whose tagged state is changing, updates the * {@link Tag}'s unread count appropriately.<p> * * Messages in the conversation are omitted from this operation if * one or more of the following applies:<ul> * <li>The caller lacks {@link ACL#RIGHT_WRITE} permission on * the <code>Message</code>. * <li>The caller has specified a {@link MailItem.TargetConstraint} * that explicitly excludes the <code>Message</code>. * <li>The caller has specified the maximum change number they * know about, and the (modification/content) change number on * the <code>Message</code> is greater.</ul> * As a result of all these constraints, no messages may actually be * tagged/untagged. * * @perms {@link ACL#RIGHT_WRITE} on all the messages */ @Override void alterTag(Tag tag, boolean add) throws ServiceException { if (tag == null) { throw ServiceException.FAILURE("missing tag argument", null); } if (!add && !isTagged(tag)) { return; } if (tag.getId() == Flag.ID_UNREAD) { throw ServiceException.FAILURE("unread state must be set with alterUnread", null); } if (tag instanceof Flag && ((Flag) tag).isSystemFlag()) { throw MailServiceException.CANNOT_TAG(tag, this); } markItemModified(tag instanceof Flag ? Change.FLAGS : Change.TAGS); TargetConstraint tcon = mMailbox.getOperationTargetConstraint(); boolean excludeAccess = false; List<Message> msgs = getMessages(); List<Integer> targets = new ArrayList<Integer>(msgs.size()); for (Message msg : msgs) { if (msg.isTagged(tag) == add) { continue; } else if (!msg.canAccess(ACL.RIGHT_WRITE)) { excludeAccess = true; continue; } else if (!msg.checkChangeID() || !TargetConstraint.checkItem(tcon, msg)) { continue; } else if (add && !tag.canTag(msg)) { throw MailServiceException.CANNOT_TAG(tag, this); } targets.add(msg.getId()); msg.tagChanged(tag, add); int delta = add ? 1 : -1; if (tag.trackUnread() && msg.isUnread()) { tag.updateUnread(delta, isTagged(Flag.FlagInfo.DELETED) ? delta : 0); } if (tag.getId() == Flag.ID_DELETED) { getFolder().updateSize(0, delta, 0); if (msg.isUnread()) { msg.updateUnread(0, delta); } } } if (targets.isEmpty()) { if (excludeAccess) { throw ServiceException.PERM_DENIED("you do not have sufficient permissions"); } } else { if (ZimbraLog.mailop.isDebugEnabled()) { String operation = add ? "Setting" : "Unsetting"; ZimbraLog.mailop.debug("%s %s for %s. Affected ids: %s", operation, getMailopContext(tag), getMailopContext(this), StringUtil.join(",", targets)); } recalculateCounts(msgs); DbTag.alterTag(tag, targets, add); } }
<reponame>matheuscscp/IDJ #include <iostream> #include <fstream> #include <ctime> #include "simplestructures.hpp" using namespace std; MainParam::MainParam (int argc, char** argv) : argc ( argc ), argv ( argv ) { } bool MainParam::find (const string& what) const { for ( int i = 0; i < argc; ++i ) { if ( what == argv[i] ) return true; } return false; } mexception::mexception (const string& what) throw () : what_ ( what ) { } mexception::~mexception () throw () { } const char* mexception::what () const throw () { return what_.c_str (); } void mexception::logerr () const throw () { cout << "Logging error...\n"; fstream f ( "./ErrorLog.txt", fstream::out | fstream::app ); time_t rawtime = time ( NULL ); f << "- Error: "; f << what_; f << ". Occured on "; f << ctime ( &rawtime ); f.close (); }
/** * Assigns timestamps to the elements in the data stream and generates watermarks to signal * event time progress. The given {@link WatermarkStrategy} is used to create a {@link * TimestampAssigner} and {@link WatermarkGenerator}. * * <p>For each event in the data stream, the {@link TimestampAssigner#extractTimestamp(Object, * long)} method is called to assign an event timestamp. * * <p>For each event in the data stream, the {@link WatermarkGenerator#onEvent(Object, long, * WatermarkOutput)} will be called. * * <p>Periodically (defined by the {@link ExecutionConfig#getAutoWatermarkInterval()}), the * {@link WatermarkGenerator#onPeriodicEmit(WatermarkOutput)} method will be called. * * <p>Common watermark generation patterns can be found as static methods in the * {@link org.apache.flink.api.common.eventtime.WatermarkStrategy} class. * * @param watermarkStrategy The strategy to generate watermarks based on event timestamps. * @return The stream after the transformation, with assigned timestamps and watermarks. */ public SingleOutputStreamOperator<T> assignTimestampsAndWatermarks( WatermarkStrategy<T> watermarkStrategy) { final WatermarkStrategy<T> cleanedStrategy = clean(watermarkStrategy); final int inputParallelism = getTransformation().getParallelism(); final TimestampsAndWatermarksTransformation<T> transformation = new TimestampsAndWatermarksTransformation<>( "Timestamps/Watermarks", inputParallelism, getTransformation(), cleanedStrategy); getExecutionEnvironment().addOperator(transformation); return new SingleOutputStreamOperator<>(getExecutionEnvironment(), transformation); }
def gen_training_dataset_part(dataset_root, idx_map, freq_counter, *passes, **kwargs): x_tmp = [] labels = [] dataset_names = sorted(iterkeys(idx_map)) for label, dataset_name in enumerate(dataset_names): idx_array = idx_map[dataset_name] idx_size = len(idx_array) for i, idx in enumerate(idx_array): _logger.debug( "[Training] Processing %s log #%d (%d/%d)", dataset_name, idx+1, i+1, idx_size ) x_tmp += run_pipeline( log_dataset_path(dataset_root, dataset_name, idx+1), freq_counter, *passes, training=kwargs.get("training", False) ) labels = np.concatenate([labels, np.repeat(label, idx_size)]) x = log_pipeline(x_tmp, freq_counter.count_freq) x = np.reshape(x, (len(x), -1)) return x, labels
Mariano Rajoy, Spain's prime minister, steered the embattled nation towards a €300bn (£237bn) bailout yesterday as he said he was ready to act "in the best interests of the Spanish people". Mr Rajoy, who is struggling to convince dubious markets that he can bring Spain's deficit back under control, has already asked for €100bn from Europe's bailout pot for its debt-laden banking system. He inched closer to seeking a full-blown rescue yesterday as he said Madrid would have to carefully examine the conditions of such a bailout. His comments came after European Central Bank president Mario Draghi disappointed markets on Thursday by failing to deliver immediate aid to turbulent debt markets, which have pushed Spain's cost of borrowing to unsustainably high levels. We’ll tell you what’s true. You can form your own view. From 15p €0.18 $0.18 $0.27 a day, more exclusives, analysis and extras. Mr Draghi said the central bank was considering buying up the debt of eurozone strugglers, but crucially he also stressed that nations themselves would have to formally request a bailout from the European Financial Stability Facility. This is likely to come with harsh financial conditions and austerity measures. Marchel Alexandrovich, an analyst at Jefferies, said: "What Draghi has basically indicated is that the problem in the bond markets has to get considerably worse before the ECB steps in to help." Mr Rajoy said he wanted to see more details on the "non-standard measures" being planned by the ECB – likely to be bond market intervention – before deciding on a course of action. Spain's benchmark borrowing costs slipped back below the 7 per cent danger mark yesterday, although they still remain worryingly high at 6.77 per cent. He said: "I will do, as I always do, what I believe to be in the best interest of the Spanish people... What I want to know is what these measures are, what they mean and whether they are appropriate and, in light of the circumstances, we will make a decision, but I have still not taken any decision." Sources said Spain had for the first time conceded at a meeting between Luis de Guindos, its economy minister, and German counterpart Wolfgang Schäuble it might need a full bailout worth €300bn, although Rajoy's office denied the talks. Spain already complies with stringent EU and IMF demands to reform its economy, and last month announced a €65bn package of tax hikes and spending cuts. A full-scale bailout would be a huge political embarrassment for Mr Rajoy, who claimed "victory" in June after securing a €100bn recapitalisation of its struggling banks. Spain's banks are laden with almost €200bn in sour loans from a bust property boom, and the government was forced to nationalised the fourth-biggest lender Bankia in June. Madrid fears it could be asked for further reforms of the pension system – the last campaign pledge the centre-right prime minister has not been forced to break since winning office less than a year ago. Any further cuts are likely to trigger more protests after hundreds of thousand of Spaniards took the streets to protests against austerity measures. Alongside its banking woes, Spain is in the grip of a double-dip recession, and several of its regions are likely to seek central government support to avoid going bankrupt. An ill-timed deal: History of BA's merger with Iberia October 1998 BA first takes stake in Iberia. Summer 2007 BA headed bid for Iberia fails. June 2009 Spain's recession starts. November 2009 BA and Iberia outline merger, which is nearly scuppered by BA pension deficit. January 2010 Spain exits recession but unemployment passes 20 per cent. February 2010 Iberia posts €274m 2009 loss. October 2010 Iberia posts nine-month profit. November 2010 BA's Willie Walsh and Iberia's Antonio Vazquez seal the $8bn merger. January 2012 Spain back in recession. April 2012 Iberia posts €98m loss for 2011. August 2012 Prime minister Mariano Rajoy hints at bailout. We’ll tell you what’s true. You can form your own view. At The Independent, no one tells us what to write. That’s why, in an era of political lies and Brexit bias, more readers are turning to an independent source. Subscribe from just 15p a day for extra exclusives, events and ebooks – all with no ads. Subscribe now
<filename>cmd/cmd.go package cmd import ( "flag" "fmt" "log" "net/http" "os" "github.com/ouqiang/delay-queue/config" "github.com/ouqiang/delay-queue/delayqueue" "github.com/ouqiang/delay-queue/routers" ) // Cmd 应用入口Command type Cmd struct{} var ( version bool configFile string ) const ( // AppVersion 应用版本号 AppVersion = "0.4" ) // Run 运行应用 func (cmd *Cmd) Run() { // 解析命令行参数 cmd.parseCommandArgs() if version { fmt.Println(AppVersion) os.Exit(0) } // 初始化配置 config.Init(configFile) // 初始化队列 delayqueue.Init() // 运行web server cmd.runWeb() } // 解析命令行参数 func (cmd *Cmd) parseCommandArgs() { // 配置文件 flag.StringVar(&configFile, "c", "", "./delay-queue -c /path/to/delay-queue.conf") // 版本 flag.BoolVar(&version, "v", false, "./delay-queue -v") flag.Parse() } // 运行Web Server func (cmd *Cmd) runWeb() { http.HandleFunc("/push", routers.Push) http.HandleFunc("/pop", routers.Pop) http.HandleFunc("/finish", routers.Delete) http.HandleFunc("/delete", routers.Delete) http.HandleFunc("/get", routers.Get) log.Printf("listen %s\n", config.Setting.BindAddress) err := http.ListenAndServe(config.Setting.BindAddress, nil) log.Fatalln(err) }
/**************************************************************************** ** ** Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies). ** Contact: http://www.qt-project.org/legal ** ** This file is part of the plugins of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL21$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Digia. For licensing terms and ** conditions see http://qt.digia.com/licensing. For further information ** use the contact form at http://qt.digia.com/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 2.1 or version 3 as published by the Free ** Software Foundation and appearing in the file LICENSE.LGPLv21 and ** LICENSE.LGPLv3 included in the packaging of this file. Please review the ** following information to ensure the GNU Lesser General Public License ** requirements will be met: https://www.gnu.org/licenses/lgpl.html and ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Digia gives you certain additional ** rights. These rights are described in the Digia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qwinrteglcontext.h" QT_BEGIN_NAMESPACE QWinRTEGLContext::QWinRTEGLContext(const QSurfaceFormat &format, QPlatformOpenGLContext *share, EGLDisplay display, EGLSurface surface, EGLConfig config) : QEGLPlatformContext(format, share, display, &config), m_eglSurface(surface) { } EGLSurface QWinRTEGLContext::eglSurfaceForPlatformSurface(QPlatformSurface *surface) { if (surface->surface()->surfaceClass() == QSurface::Window) { // All windows use the same surface return m_eglSurface; } else { // TODO: return EGL surfaces for offscreen surfaces qWarning("This plugin does not support offscreen surfaces."); return EGL_NO_SURFACE; } } QFunctionPointer QWinRTEGLContext::getProcAddress(const QByteArray &procName) { static QHash<QByteArray, QFunctionPointer> standardFuncs; if (standardFuncs.isEmpty()) { standardFuncs.insert(QByteArrayLiteral("glBindTexture"), (QFunctionPointer)&glBindTexture); standardFuncs.insert(QByteArrayLiteral("glBlendFunc"), (QFunctionPointer)&glBlendFunc); standardFuncs.insert(QByteArrayLiteral("glClear"), (QFunctionPointer)&glClear); standardFuncs.insert(QByteArrayLiteral("glClearColor"), (QFunctionPointer)&glClearColor); standardFuncs.insert(QByteArrayLiteral("glClearStencil"), (QFunctionPointer)&glClearStencil); standardFuncs.insert(QByteArrayLiteral("glColorMask"), (QFunctionPointer)&glColorMask); standardFuncs.insert(QByteArrayLiteral("glCopyTexImage2D"), (QFunctionPointer)&glCopyTexImage2D); standardFuncs.insert(QByteArrayLiteral("glCopyTexSubImage2D"), (QFunctionPointer)&glCopyTexSubImage2D); standardFuncs.insert(QByteArrayLiteral("glCullFace"), (QFunctionPointer)&glCullFace); standardFuncs.insert(QByteArrayLiteral("glDeleteTextures"), (QFunctionPointer)&glDeleteTextures); standardFuncs.insert(QByteArrayLiteral("glDepthFunc"), (QFunctionPointer)&glDepthFunc); standardFuncs.insert(QByteArrayLiteral("glDepthMask"), (QFunctionPointer)&glDepthMask); standardFuncs.insert(QByteArrayLiteral("glDisable"), (QFunctionPointer)&glDisable); standardFuncs.insert(QByteArrayLiteral("glDrawArrays"), (QFunctionPointer)&glDrawArrays); standardFuncs.insert(QByteArrayLiteral("glDrawElements"), (QFunctionPointer)&glDrawElements); standardFuncs.insert(QByteArrayLiteral("glEnable"), (QFunctionPointer)&glEnable); standardFuncs.insert(QByteArrayLiteral("glFinish"), (QFunctionPointer)&glFinish); standardFuncs.insert(QByteArrayLiteral("glFlush"), (QFunctionPointer)&glFlush); standardFuncs.insert(QByteArrayLiteral("glFrontFace"), (QFunctionPointer)&glFrontFace); standardFuncs.insert(QByteArrayLiteral("glGenTextures"), (QFunctionPointer)&glGenTextures); standardFuncs.insert(QByteArrayLiteral("glGetBooleanv"), (QFunctionPointer)&glGetBooleanv); standardFuncs.insert(QByteArrayLiteral("glGetError"), (QFunctionPointer)&glGetError); standardFuncs.insert(QByteArrayLiteral("glGetFloatv"), (QFunctionPointer)&glGetFloatv); standardFuncs.insert(QByteArrayLiteral("glGetIntegerv"), (QFunctionPointer)&glGetIntegerv); standardFuncs.insert(QByteArrayLiteral("glGetString"), (QFunctionPointer)&glGetString); standardFuncs.insert(QByteArrayLiteral("glGetTexParameterfv"), (QFunctionPointer)&glGetTexParameterfv); standardFuncs.insert(QByteArrayLiteral("glGetTexParameteriv"), (QFunctionPointer)&glGetTexParameteriv); standardFuncs.insert(QByteArrayLiteral("glHint"), (QFunctionPointer)&glHint); standardFuncs.insert(QByteArrayLiteral("glIsEnabled"), (QFunctionPointer)&glIsEnabled); standardFuncs.insert(QByteArrayLiteral("glIsTexture"), (QFunctionPointer)&glIsTexture); standardFuncs.insert(QByteArrayLiteral("glLineWidth"), (QFunctionPointer)&glLineWidth); standardFuncs.insert(QByteArrayLiteral("glPixelStorei"), (QFunctionPointer)&glPixelStorei); standardFuncs.insert(QByteArrayLiteral("glPolygonOffset"), (QFunctionPointer)&glPolygonOffset); standardFuncs.insert(QByteArrayLiteral("glReadPixels"), (QFunctionPointer)&glReadPixels); standardFuncs.insert(QByteArrayLiteral("glScissor"), (QFunctionPointer)&glScissor); standardFuncs.insert(QByteArrayLiteral("glStencilFunc"), (QFunctionPointer)&glStencilFunc); standardFuncs.insert(QByteArrayLiteral("glStencilMask"), (QFunctionPointer)&glStencilMask); standardFuncs.insert(QByteArrayLiteral("glStencilOp"), (QFunctionPointer)&glStencilOp); standardFuncs.insert(QByteArrayLiteral("glTexImage2D"), (QFunctionPointer)&glTexImage2D); standardFuncs.insert(QByteArrayLiteral("glTexParameterf"), (QFunctionPointer)&glTexParameterf); standardFuncs.insert(QByteArrayLiteral("glTexParameterfv"), (QFunctionPointer)&glTexParameterfv); standardFuncs.insert(QByteArrayLiteral("glTexParameteri"), (QFunctionPointer)&glTexParameteri); standardFuncs.insert(QByteArrayLiteral("glTexParameteriv"), (QFunctionPointer)&glTexParameteriv); standardFuncs.insert(QByteArrayLiteral("glTexSubImage2D"), (QFunctionPointer)&glTexSubImage2D); standardFuncs.insert(QByteArrayLiteral("glViewport"), (QFunctionPointer)&glViewport); standardFuncs.insert(QByteArrayLiteral("glActiveTexture"), (QFunctionPointer)&glActiveTexture); standardFuncs.insert(QByteArrayLiteral("glAttachShader"), (QFunctionPointer)&glAttachShader); standardFuncs.insert(QByteArrayLiteral("glBindAttribLocation"), (QFunctionPointer)&glBindAttribLocation); standardFuncs.insert(QByteArrayLiteral("glBindBuffer"), (QFunctionPointer)&glBindBuffer); standardFuncs.insert(QByteArrayLiteral("glBindFramebuffer"), (QFunctionPointer)&glBindFramebuffer); standardFuncs.insert(QByteArrayLiteral("glBindRenderbuffer"), (QFunctionPointer)&glBindRenderbuffer); standardFuncs.insert(QByteArrayLiteral("glBlendColor"), (QFunctionPointer)&glBlendColor); standardFuncs.insert(QByteArrayLiteral("glBlendEquation"), (QFunctionPointer)&glBlendEquation); standardFuncs.insert(QByteArrayLiteral("glBlendEquationSeparate"), (QFunctionPointer)&glBlendEquationSeparate); standardFuncs.insert(QByteArrayLiteral("glBlendFuncSeparate"), (QFunctionPointer)&glBlendFuncSeparate); standardFuncs.insert(QByteArrayLiteral("glBufferData"), (QFunctionPointer)&glBufferData); standardFuncs.insert(QByteArrayLiteral("glBufferSubData"), (QFunctionPointer)&glBufferSubData); standardFuncs.insert(QByteArrayLiteral("glCheckFramebufferStatus"), (QFunctionPointer)&glCheckFramebufferStatus); standardFuncs.insert(QByteArrayLiteral("glCompileShader"), (QFunctionPointer)&glCompileShader); standardFuncs.insert(QByteArrayLiteral("glCompressedTexImage2D"), (QFunctionPointer)&glCompressedTexImage2D); standardFuncs.insert(QByteArrayLiteral("glCompressedTexSubImage2D"), (QFunctionPointer)&glCompressedTexSubImage2D); standardFuncs.insert(QByteArrayLiteral("glCreateProgram"), (QFunctionPointer)&glCreateProgram); standardFuncs.insert(QByteArrayLiteral("glCreateShader"), (QFunctionPointer)&glCreateShader); standardFuncs.insert(QByteArrayLiteral("glDeleteBuffers"), (QFunctionPointer)&glDeleteBuffers); standardFuncs.insert(QByteArrayLiteral("glDeleteFramebuffers"), (QFunctionPointer)&glDeleteFramebuffers); standardFuncs.insert(QByteArrayLiteral("glDeleteProgram"), (QFunctionPointer)&glDeleteProgram); standardFuncs.insert(QByteArrayLiteral("glDeleteRenderbuffers"), (QFunctionPointer)&glDeleteRenderbuffers); standardFuncs.insert(QByteArrayLiteral("glDeleteShader"), (QFunctionPointer)&glDeleteShader); standardFuncs.insert(QByteArrayLiteral("glDetachShader"), (QFunctionPointer)&glDetachShader); standardFuncs.insert(QByteArrayLiteral("glDisableVertexAttribArray"), (QFunctionPointer)&glDisableVertexAttribArray); standardFuncs.insert(QByteArrayLiteral("glEnableVertexAttribArray"), (QFunctionPointer)&glEnableVertexAttribArray); standardFuncs.insert(QByteArrayLiteral("glFramebufferRenderbuffer"), (QFunctionPointer)&glFramebufferRenderbuffer); standardFuncs.insert(QByteArrayLiteral("glFramebufferTexture2D"), (QFunctionPointer)&glFramebufferTexture2D); standardFuncs.insert(QByteArrayLiteral("glGenBuffers"), (QFunctionPointer)&glGenBuffers); standardFuncs.insert(QByteArrayLiteral("glGenerateMipmap"), (QFunctionPointer)&glGenerateMipmap); standardFuncs.insert(QByteArrayLiteral("glGenFramebuffers"), (QFunctionPointer)&glGenFramebuffers); standardFuncs.insert(QByteArrayLiteral("glGenRenderbuffers"), (QFunctionPointer)&glGenRenderbuffers); standardFuncs.insert(QByteArrayLiteral("glGetActiveAttrib"), (QFunctionPointer)&glGetActiveAttrib); standardFuncs.insert(QByteArrayLiteral("glGetActiveUniform"), (QFunctionPointer)&glGetActiveUniform); standardFuncs.insert(QByteArrayLiteral("glGetAttachedShaders"), (QFunctionPointer)&glGetAttachedShaders); standardFuncs.insert(QByteArrayLiteral("glGetAttribLocation"), (QFunctionPointer)&glGetAttribLocation); standardFuncs.insert(QByteArrayLiteral("glGetBufferParameteriv"), (QFunctionPointer)&glGetBufferParameteriv); standardFuncs.insert(QByteArrayLiteral("glGetFramebufferAttachmentParameteriv"), (QFunctionPointer)&glGetFramebufferAttachmentParameteriv); standardFuncs.insert(QByteArrayLiteral("glGetProgramiv"), (QFunctionPointer)&glGetProgramiv); standardFuncs.insert(QByteArrayLiteral("glGetProgramInfoLog"), (QFunctionPointer)&glGetProgramInfoLog); standardFuncs.insert(QByteArrayLiteral("glGetRenderbufferParameteriv"), (QFunctionPointer)&glGetRenderbufferParameteriv); standardFuncs.insert(QByteArrayLiteral("glGetShaderiv"), (QFunctionPointer)&glGetShaderiv); standardFuncs.insert(QByteArrayLiteral("glGetShaderInfoLog"), (QFunctionPointer)&glGetShaderInfoLog); standardFuncs.insert(QByteArrayLiteral("glGetShaderPrecisionFormat"), (QFunctionPointer)&glGetShaderPrecisionFormat); standardFuncs.insert(QByteArrayLiteral("glGetShaderSource"), (QFunctionPointer)&glGetShaderSource); standardFuncs.insert(QByteArrayLiteral("glGetUniformfv"), (QFunctionPointer)&glGetUniformfv); standardFuncs.insert(QByteArrayLiteral("glGetUniformiv"), (QFunctionPointer)&glGetUniformiv); standardFuncs.insert(QByteArrayLiteral("glGetUniformLocation"), (QFunctionPointer)&glGetUniformLocation); standardFuncs.insert(QByteArrayLiteral("glGetVertexAttribfv"), (QFunctionPointer)&glGetVertexAttribfv); standardFuncs.insert(QByteArrayLiteral("glGetVertexAttribiv"), (QFunctionPointer)&glGetVertexAttribiv); standardFuncs.insert(QByteArrayLiteral("glGetVertexAttribPointerv"), (QFunctionPointer)&glGetVertexAttribPointerv); standardFuncs.insert(QByteArrayLiteral("glIsBuffer"), (QFunctionPointer)&glIsBuffer); standardFuncs.insert(QByteArrayLiteral("glIsFramebuffer"), (QFunctionPointer)&glIsFramebuffer); standardFuncs.insert(QByteArrayLiteral("glIsProgram"), (QFunctionPointer)&glIsProgram); standardFuncs.insert(QByteArrayLiteral("glIsRenderbuffer"), (QFunctionPointer)&glIsRenderbuffer); standardFuncs.insert(QByteArrayLiteral("glIsShader"), (QFunctionPointer)&glIsShader); standardFuncs.insert(QByteArrayLiteral("glLinkProgram"), (QFunctionPointer)&glLinkProgram); standardFuncs.insert(QByteArrayLiteral("glReleaseShaderCompiler"), (QFunctionPointer)&glReleaseShaderCompiler); standardFuncs.insert(QByteArrayLiteral("glRenderbufferStorage"), (QFunctionPointer)&glRenderbufferStorage); standardFuncs.insert(QByteArrayLiteral("glSampleCoverage"), (QFunctionPointer)&glSampleCoverage); standardFuncs.insert(QByteArrayLiteral("glShaderBinary"), (QFunctionPointer)&glShaderBinary); standardFuncs.insert(QByteArrayLiteral("glShaderSource"), (QFunctionPointer)&glShaderSource); standardFuncs.insert(QByteArrayLiteral("glStencilFuncSeparate"), (QFunctionPointer)&glStencilFuncSeparate); standardFuncs.insert(QByteArrayLiteral("glStencilMaskSeparate"), (QFunctionPointer)&glStencilMaskSeparate); standardFuncs.insert(QByteArrayLiteral("glStencilOpSeparate"), (QFunctionPointer)&glStencilOpSeparate); standardFuncs.insert(QByteArrayLiteral("glUniform1f"), (QFunctionPointer)&glUniform1f); standardFuncs.insert(QByteArrayLiteral("glUniform1fv"), (QFunctionPointer)&glUniform1fv); standardFuncs.insert(QByteArrayLiteral("glUniform1i"), (QFunctionPointer)&glUniform1i); standardFuncs.insert(QByteArrayLiteral("glUniform1iv"), (QFunctionPointer)&glUniform1iv); standardFuncs.insert(QByteArrayLiteral("glUniform2f"), (QFunctionPointer)&glUniform2f); standardFuncs.insert(QByteArrayLiteral("glUniform2fv"), (QFunctionPointer)&glUniform2fv); standardFuncs.insert(QByteArrayLiteral("glUniform2i"), (QFunctionPointer)&glUniform2i); standardFuncs.insert(QByteArrayLiteral("glUniform2iv"), (QFunctionPointer)&glUniform2iv); standardFuncs.insert(QByteArrayLiteral("glUniform3f"), (QFunctionPointer)&glUniform3f); standardFuncs.insert(QByteArrayLiteral("glUniform3fv"), (QFunctionPointer)&glUniform3fv); standardFuncs.insert(QByteArrayLiteral("glUniform3i"), (QFunctionPointer)&glUniform3i); standardFuncs.insert(QByteArrayLiteral("glUniform3iv"), (QFunctionPointer)&glUniform3iv); standardFuncs.insert(QByteArrayLiteral("glUniform4f"), (QFunctionPointer)&glUniform4f); standardFuncs.insert(QByteArrayLiteral("glUniform4fv"), (QFunctionPointer)&glUniform4fv); standardFuncs.insert(QByteArrayLiteral("glUniform4i"), (QFunctionPointer)&glUniform4i); standardFuncs.insert(QByteArrayLiteral("glUniform4iv"), (QFunctionPointer)&glUniform4iv); standardFuncs.insert(QByteArrayLiteral("glUniformMatrix2fv"), (QFunctionPointer)&glUniformMatrix2fv); standardFuncs.insert(QByteArrayLiteral("glUniformMatrix3fv"), (QFunctionPointer)&glUniformMatrix3fv); standardFuncs.insert(QByteArrayLiteral("glUniformMatrix4fv"), (QFunctionPointer)&glUniformMatrix4fv); standardFuncs.insert(QByteArrayLiteral("glUseProgram"), (QFunctionPointer)&glUseProgram); standardFuncs.insert(QByteArrayLiteral("glValidateProgram"), (QFunctionPointer)&glValidateProgram); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib1f"), (QFunctionPointer)&glVertexAttrib1f); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib1fv"), (QFunctionPointer)&glVertexAttrib1fv); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib2f"), (QFunctionPointer)&glVertexAttrib2f); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib2fv"), (QFunctionPointer)&glVertexAttrib2fv); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib3f"), (QFunctionPointer)&glVertexAttrib3f); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib3fv"), (QFunctionPointer)&glVertexAttrib3fv); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib4f"), (QFunctionPointer)&glVertexAttrib4f); standardFuncs.insert(QByteArrayLiteral("glVertexAttrib4fv"), (QFunctionPointer)&glVertexAttrib4fv); standardFuncs.insert(QByteArrayLiteral("glVertexAttribPointer"), (QFunctionPointer)&glVertexAttribPointer); standardFuncs.insert(QByteArrayLiteral("glClearDepthf"), (QFunctionPointer)&glClearDepthf); standardFuncs.insert(QByteArrayLiteral("glDepthRangef"), (QFunctionPointer)&glDepthRangef); }; QHash<QByteArray, QFunctionPointer>::const_iterator i = standardFuncs.find(procName); if (i != standardFuncs.end()) return i.value(); return QEGLPlatformContext::getProcAddress(procName); } QT_END_NAMESPACE
w=input() k=w[0] sub=w[1:(len(w)+1)] if(len(w)==1 and w.islower()): print(w.upper()) else: if w.isupper(): print(w.lower()) elif(k.islower() and sub.isupper()): k=k.upper() sub=sub.lower() print(k+sub) else: print(w)
<gh_stars>10-100 import * as chai from 'chai'; import * as slimdom from 'slimdom'; import jsonMlMapper from 'test-helpers/jsonMlMapper'; import { evaluateXPathToMap } from 'fontoxpath'; let documentNode; beforeEach(() => { documentNode = new slimdom.Document(); }); describe('map constructor', () => { beforeEach(() => { jsonMlMapper.parse( ['someElement', { someAttribute: 'someValue' }, 'A piece of text'], documentNode ); }); it('can be parsed', () => chai.assert.isOk( evaluateXPathToMap('map {"a": 1, "b":2}', documentNode), 'It should be able to be parsed' )); it('creates a map which can be returned by evaluateXPathToMap', () => chai.assert.deepEqual(evaluateXPathToMap('map {"a": 1, "b":2}', documentNode), { a: 1, b: 2, })); it('can use attribute nodes as keys', () => chai.assert.deepEqual( evaluateXPathToMap('map {@someAttribute: 1}', documentNode.documentElement), { someValue: 1 } )); it('can use nodes as keys', () => chai.assert.deepEqual(evaluateXPathToMap('map {*: 1}', documentNode), { 'A piece of text': 1, })); it('is parsed using longest substring (map{x:a:b} is map{{(x:a):b})', () => { const element = documentNode.createElement('someElement'); element .appendChild(documentNode.createElementNS('xxx', 'x:a')) .appendChild(documentNode.createTextNode('a')); element .appendChild(documentNode.createElementNS('aaa', 'a:b')) .appendChild(documentNode.createTextNode('a:b')); const expectedElement = element.appendChild(documentNode.createElementNS('', 'b')); expectedElement.appendChild(documentNode.createTextNode('b')); const namespacesByPrefix = { a: 'aaa', x: 'xxx', '': null, }; chai.assert.deepEqual( evaluateXPathToMap('map {x:a:b}', element, null, null, { namespaceResolver: (prefix) => namespacesByPrefix[prefix], }), { a: expectedElement, } ); }); it('throws an error when the key is not a singleton sequence', () => chai.assert.throws( () => evaluateXPathToMap('map {(1, 2): 3}', documentNode.documentElement), 'XPTY0004' )); });
def load_netcdf(files_list): Data = {} for j, file in enumerate(files_list): file_temp = netcdf.NetCDFFile(file, 'r', maskandscale=True) for key in file_temp.variables.keys(): if key not in Data.keys(): Data[key] = file_temp.variables[key][:] elif key not in ['latitude', 'longitude']: Data[key] = np.concatenate((Data[key], file_temp.variables[key][:]), axis=0) Data['time'] = _convert_time(Data['time'].astype(np.float64)) return Data
def calc_ar_props( trace, ar_p, sn=None, g=None, noise_range=(0.25, 0.5), noise_method="mean", lags=5, fudge_factor=0.96, ): g, sn = estimate_parameters( trace, p=ar_p, sn=sn, g=g, range_ff=noise_range, method=noise_method, lags=lags, fudge_factor=fudge_factor, ) if ar_p == 1: std_ar = np.sqrt(sn ** 2 / (1 - float(g[0]) ** 2)) elif ar_p == 2: std_ar = np.sqrt( (1 - g[1]) * sn ** 2 / ((1 + g[1]) * (1 - g[0] - g[1]) * (1 + g[0] - g[1])) ) else: raise ValueError("AR process parameter p must be 1 or 2.") return g, sn, std_ar
ip = input().split(' ') n = int(ip[0]) k = int(ip[1]) passwords = [] for x in range(n): passwords.append(input()) vanya = input() length = len(vanya) lengths = [] for y in passwords: lengths.append(len(y)) above = 0 below = 0 equal = 0 for x in range(len(lengths)): if lengths[x] < length: above += 1 elif lengths[x] > length: below += 1 elif lengths[x] == length: equal += 1 failuretime = int((above/k))*5 bestcase = above + failuretime + 1 worstcase = int((above+equal-1)/k)*5 + (above+equal) print(bestcase, worstcase)
C99 selectively accumulates in vulnerable neurons in Alzheimer’s disease Introduction The levels and distribution of amyloid deposits in the brain does not correlate well with Alzheimer’s disease (AD) progression. Therefore, it is likely that Amyloid-precursor-protein proteolytic fragments other than beta-amyloid contribute to the onset of AD. Methods We developed a sensitive assay adapted to the detection of C99, the direct precursor of beta-amyloid. Three postmortem groups were studied: control with normal and stable cognition; subjects with moderate AD, and individuals with severe AD. The amount of C99 and beta-amyloid was quantified and correlated with the severity of AD. Results C99 accumulates in vulnerable neurons, and its levels correlate with the degree of cognitive impairment in patients suffering from AD. In contrast, beta-amyloid levels are increased in both vulnerable and resistant brain areas. Discussion These results raise the possibility that C99, rather than beta-amyloid plaques, is responsible for the death of nerve cells in Alzheimer’s disease. Introduction Alzheimer's disease (AD) is the most common type of dementia, characterized by accumulation of extracellular and intracellular beta-amyloid aggregates, intracellular paired helical fragments and extensive age-dependent neuronal loss in vulnerable areas . Robust genetic and biochemical evidence points to alterations of Amyloid-precursor protein (APP) metabolism as causative in the development of Alzheimer's disease. In the amyloidogenic pathway, APP is cleaved by beta-secretase (BACE) to generate sAPPb and C99. Gamma-secretase then cleaves C99 to generate beta-amyloid and AICD. Accumulation of betaamyloid is a cardinal feature of Alzheimer's disease, but its role in causing injury in AD remains unclear. Thus, the distribution of beta-amyloid deposits in AD brains has a poor correlation with dementia severity, loss of neural function and cognitive impairment . Therefore, it is possible that APP metabolites other than beta-amyloid make a significant contribution to AD pathophysiology. In addressing this issue, a challenge has been the inability to determine the distribution and levels of specific APP metabolites in intact cells and tissues. We have approached this problem by adapting the proximity ligation assay (PLA) for the detection of C99. In this assay, two primary antibodies recognize the target epitopes on the protein of interest. If the epitopes are in close proximity, secondary antibodies covalently bound to complementary DNA strands participate in rolling circle DNA synthesis. The DNA synthesis reaction results in a one thousand-fold amplification of the DNA circle. Fluorescently-labeled complementary oligonucleotide probes added to the reaction bind to the amplified DNA. The resulting fluorescent signal can be detected by fluorescence microscopy. This technique offers great sensitivity, since powerful rolling circle amplification is used. It also offers great selectivity, since dual target recognition is used . Here, we used antibodies generated against both the N and C terminal domains of C99 in cultured cells and found that we can distinguish C99 from beta-amyloid, full-length APP, C83, and AICD. The adaptation of the proximity ligation assay to the detection of C99 provides a general framework for the detection in situ of low-abundance proteolytic fragments in human brain and animal models. Antibodies Primary antibodies used were anti-NeuN, Millipore; anti-GFAP, Abcam; anti-N-terminal C99, 6C3, Abcam, anti-Tau AF3494, R&D, anti-Abeta 6E10, Covance, and RU-369 produced at the Rockefeller University. Secondary antibody was AlexaFluor 488 from Molecular Probes. Next day, the cells were washed with PBS, fixed in 4% paraformaldehyde in PBS for 10 min at room temperature, permeabilized in PBST (PBS with 0.2 % Triton X-100) for 10 min, blocked in 10% goat serum in PBST and incubated in primary antibody overnight in blocking solution. Primary antibodies used to recognize C99 were: RU-369 (Immune serum raised in rabbit, 1:50 dilution) and 6C3 (Abcam, catalog number 126649 raised in mouse, 1:50 dilution). Next, the Duolink kit was used to develop the PLA signal (Sigma, catalog number DUO92008). Briefly, cells were then washed twice for 10 min each time in wash buffer A, incubated with PLA probe solution for 1 h at 37 °C, washed again in wash buffer A, and incubated in ligation-ligase solution for 30 min at 37 °C. Coverslips were washed in wash buffer A and incubated in amplification-polymerase solution for 100 min at 37 °C, washed twice for 10 min each time in wash buffer B. Next, the coverslips were mounted in Duolink mounting media with DAPI. The specificity of the signal was controlled by omitting one of the antibodies. Cells were imaged using a Zeiss LSM710 laser confocal microscope. Quantification of PLA signal was performed using Image J software (NIH). Proximity ligation assay (PLA) in cells N2A cells stably overexpressing APP-695 were seeded on sterile, Zeiss high-performance cover glass at a density of 175,000 cells/well and grown overnight. Cells were treated with DMSO or a gamma secretase inhibitor (1 µM) in media for 3 hours. Cells were fixed with 4% PFA in PBS for 10 min at room temperature, permeabilized with PBST (PBS with 0.2 % Triton X-100) for 10 min at room temperature, and blocked in Duolink® PLA blocking solution for 1 hour at 37 °C. The cells were then incubated with primary antibodies RU-369 and 6C3, both at a 1:50 dilution in the Duolink® antibody diluent, overnight at 4°C. PLA was conducted the next day using the Duolink® PLA kit according to manufacturer's instructions until the mounting step in which the cells were separately stained for DAPI and then mounted onto a glass slide using VECTASHIELD® Antifade Mounting Medium without DAPI (Vector, catalog number H-1000). Coverslips were sealed with metallic nail polish and stored in the dark at 4°C until imaging. Super resolution three-dimensional images were taken using a DeltaVision OMX™ microscope in Structured Illumination mode. 568 nm and 405 nm lasers were used to image DAPI and PLA. Camera temperatures were kept at -80 °C, chamber temperature was kept at 23 °C, and an immersion oil with a refractive index of 1.515 was used. Z-stacks were taken to include all visible PLA dots and nuclei on the coverslip. The image data acquired by DeltaVision OMX was then reconstructed using softWoRx Imaging Workstation. Linespacing, best fit for K0, and amplitude values were evaluated to ensure the quality of the reconstruction. The reconstruction was then subjected to further processing by the "OMX Align Image" function. The final image was then compared to a widefield version of the same image using the "Generate Widefield" and "Deconvolve" functions on the raw non-reconstructed data. Individual sections from the total z-stack were used for comparison. Proximity ligation assay in human brain sections. Forty nine human brain slides from hippocampus, frontal cortex and occipital cortex were obtained from the brain bank at Mount Sinai/JJ Peters VA Medical Center NIH Brain and Tissue Repository. Donors were selected to evidence no neuropathology, or only the neuropathology of AD excluding any other discernable neuropathology, such a cerebrovascular disease or Lewy bodies. Before proceeding with the staining protocol, the slides were deparaffinized and rehydrated. Slides were placed in a rack, and the following washes were performed: 1. Xylene: 2 x 3 minutes. 2. Xylene 1:1 with 100% ethanol: 3 minutes 3. 100% ethanol: 2 x 3 minutes 4. 95% ethanol: 3 minutes 5. 70 % ethanol: 3 minutes 6. 50 % ethanol: 3 minutes. At the end of the washes, the brain slides were rinsed with water. Antigen retrieval was performed by microwaving the slides for 20 min in citric buffer, pH 6.0. Next, the slides were permeabilized in PBST (PBS with 0.2 % Triton X-100) for 10 min, blocked in 10% goat serum in PBST and incubated in primary antibody overnight in blocking solution. Primary antibodies used to recognize C99 were: RU-369 (Immune serum raised in rabbit, 1:50 dilution) and 6C3 (Abcam, catalog number 126649 raised in mouse, 1:50 dilution). Next, the Duolink kit was used to develop the PLA signal (Sigma, catalog number DUO92008). Cells were then washed twice for 10 min each time in wash buffer A, incubated with PLA probe solution for 1 h at 37 °C, washed again in wash buffer A, and incubated in ligation-ligase solution for 30 min at 37 °C. Coverslips were washed in wash buffer A and incubated in amplification-polymerase solution for 100 min at 37 °C, washed twice for 10 min each time in wash buffer B. Next, the slides were incubated with 0.001% DAPI in 0.01% wash buffer B for 5 min. Then, to quench autofluorescence, the slides were treated with "Trueblack" (Biotum, catalog number 23007) in 70% ethanol for 1 min. and subsequently mounted using Fluoromount-G (Southern Biotech). Slides were imaged using a Zeiss LSM710 laser confocal microscope. For coimmunofluorescence of C99-PLA with neuron and astrocyte markers, as well as with Tau protein, respective antibodies were used together with C99 primary antibodies and secondary antibodies together with PLA probes. PLA quantification. C99-PLA produces bright red dots that were quantified using Image J software (NIH). The tiff images were thresholded, and the function "Analyze particles" was applied in the red channel. In the blue channel, corresponding to DAPI staining, the function "Cell counter" was applied after threshold. OMX 3D-SIM using DeltaVision OMX N2A695 cells were seeded on sterile, Zeiss high-performance cover glass at a density of 175,000 cells/well and grown overnight. Cells were treated with DMSO or a gamma secretase inhibitor (1 µM) in media for 3 hours. Cells were fixed with 4% PFA in PBS and then permeabilized with 0.2% PBS-Triton. Cells were blocked for one hour in Duolink® PLA blocking solution at 37°C. The cells were then incubated with primary antibodies APP369 and C99, both at a 1:50 dilution in the Duolink® antibody diluent, overnight at 4°C. PLA was conducted the next day using the Duolink® PLA kit according to the manufacturer's instructions until the mounting step in which the cells were separately stained for DAPI and then mounted onto a glass slide using VECTASHIELD® Antifade Mounting Medium without DAPI. Coverslips were sealed with metallic nail polish and stored in the dark at 4°C until imaging. Super resolution threedimensional images were taken using a DeltaVision OMX™ microscope in Structured Illumination mode. 568 nm and 405 nm lasers were used to image DAPI and PLA. Camera temperatures were kept at -80°, chamber temperature was kept at 23°C, and an immersion oil with a refractive index of 1.515 was used. Zstacks were taken to include all visible PLA dots and nuclei on the coverslip. The image data acquired by DeltaVision OMX was then reconstructed using softWoRx Imaging Workstation. Linespacing, best fit for K0, and amplitude values were evaluated to ensure the quality of the reconstruction. The reconstruction was then subjected to further processing by the OMX Align Image function. The final image was then compared to a widefield version of the same image using the generate widefield and deconvolve functions on the raw non-reconstructed data. Individual sections from the total z-stack were used for comparisons. Amyloid plaque staining Sections from paraffin-embedded blocks were stained using hematoxylin-eosin and anti-β amyloid 4G8 and anti-tau AD2. All neuropathological data regarding the extent and distribution of neuropathological lesions were collected by neuropathologists unaware of any of the clinical and psychometric data. For quantitative measures of plaque density, 5 representative high-power fields (0.5 mm 2 ) were examined and an average density score was calculated for each region and expressed as mean plaque density per square millimeter. When plaques were unevenly distributed in each slide, plaques in the region with the highest density were counted. These sections were adjacent to the sections used to quantify C99 levels. Statistical analysis The data are presented as means ± SD. The significance level was determined using a two-way ANOVA with Bonferroni post-test, comparing severe versus non-demented control, moderate versus non-demented control and severe versus moderate. Multiple regression analysis in figure 4B was performed using SPSS statistical package from IBM. Other statistical analyses were performed by using GraphPad Prism software. Results First, we screened several antibodies aimed at recognizing the N and C-terminal domains of C99. The combination of 6C3, a mouse monoclonal antibody which recognizes the cleaved N-terminal of C99 and RU-369, a rabbit polyclonal antibody designed to recognize the C-terminal of APP, successfully recognized C99 ( Figure 1A). As expected with a C99-specific marker, the signal was greatly reduced by a BACE inhibitor, which blocks C99 synthesis, and dramatically increased by a gamma-secretase inhibitor, which blocks C99 breakdown (Figure 1B). To further exclude the possibility that the 6C3 antibody recognizes full length APP, we performed double immunofluorescence with 6C3 and RU-369 in N2A-695 cells treated with DMSO, BACE inhibitor or gamma-secretase inhibitor. The signal from 6C3 was reduced upon treatment with a BACE inhibitor and increased upon treatment with gamma-secretase inhibitor, suggesting that 6C3 detects C99 and not APP full length. In addition, Western blot analysis indicated that 6C3 recognized only C99 and not APP full length. (Supplementary Fig. 1). C99-PLA produces bright red dots that can easily be quantified. Super-resolution microscopy revealed that each dot, measuring about 200-400 nm, consisted of a group of 2-3 smaller dots, consistent with the idea of aggregation of C99 molecules ( Figure 1C). Next, we applied this PLA methodology to study the localization of C99 in human brain tissue. Brain tissue sections were obtained from hippocampus, frontal cortex and occipital cortex from 12 controls and 37 patients suffering from sporadic Alzheimer's disease. Age at the time of death, sex, Braak stage, Thai and CERAD score of these subjects is listed in Supplementary Table 1. PLA specificity was verified by omitting one of the antibodies (Supplementary figure 2). As seen in Figure 2A, C99 is readily detected in the soma and processes of neurons of cognitively normal patients. To investigate whether C99 levels correlate with cognitive decline, three postmortem groups were studied: subjects with normal and stable cognition, as measured by the clinical dementia rating (CDR = 0), subjects with moderate AD (CDR = 0.5-2), and subjects with severe AD (CDR = 3-5). Average C99 levels were increased in the CA1 area of the hippocampus when comparing severe AD to non-demented controls (39.8 +/-11.8 vs 12.5 +/-4.1 (PLA dots/cell), p< 0.001), moderate AD to controls (28.3 +/-6.4 vs 12.5 +/-4.1, p<0.001) or severe AD to moderate AD (39.8 +/-11.8 vs 28.3 +/-6.4 , p<0.05) (Figure 2A, E). In the dentate gyrus area, C99 levels were increased when comparing severe AD to non-demented controls (10.8 +/-2.8 vs 2.5 +/-1.1, p<0.001), moderate AD to non-demented controls (6.5 +/-2.4 vs 2.5 +/-1.1, p<0.05) and severe AD to moderate AD (10.8 +/-2.8 vs 6.5 +/-2.4) (Figure 2B, E). In the middle frontal gyrus of the frontal cortex, C99 levels were increased when comparing severe AD to controls (7.1 +/-2.0 vs 2.3 +/-1.6, p<0.001) and severe AD to moderate AD (7.1 +/-2.0 vs 2.60 +/-1.6, p<0.001) (Figure 2C, E). Levels of C99 in occipital cortex, an area considered to be resistant to neurodegeneration, were similar in all three groups (Figure 2D, E). Four additional samples from hippocampus of non-demented patients and with severe AD were subject to Western blot analysis using the antibody 6E10. A 4.5 times increase in C99 levels was observed when comparing severe AD to non-demented controls, comparable to the increase seen by PLA (Supplementary figure 3) Co-staining with the neuronal nuclear marker NeuN showed that C99 accumulated in the perinuclear region of neurons in non-demented subjects. Its distribution changed to an intranuclear pattern in Alzheimer's disease subjects in dentate gyrus and frontal cortex (Figure 3A and 3C). Interestingly, C99 also accumulated in astrocytes in AD subjects, where it was localized in both the soma and processes, as revealed by GFAP staining. However, in contrast to neurons, the distribution of C99 in astrocytes did not change between non-demented and demented subjects (Figure 3B and 3C). Next, we compared the levels of betaamyloid plaques and C99 in middle frontal cortex and occipital cortex. As seen in Figure 4A and 4B, betaamyloid plaques were present in both vulnerable and non-vulnerable areas, while C99 accumulation correlated with disease progression only in the middle frontal cortex, a vulnerable area, but not in the occipital cortex, a resistant area. To study the association of C99 with selective neuronal vulnerability, costaining with Tau was performed in CA1 area of 7 non-demented controls and 15 AD patients. As seen in Figure 4C and 4D, C99 levels were greatly increased in neurons with high levels of Tau, which is a widely accepted marker of neuronal vulnerability. These results show that C99 accumulation, as opposed to betaamyloid plaques, is specific to neurons most vulnerable to neurodegeneration in Alzheimer's disease. Discussion Alzheimer's disease is characterized by accumulation of amyloid plaques and paired helical filaments. Mutations that cause early onset AD are located in the genes that code for the amyloid precursor protein, APP, and the enzyme responsible for amyloid production, Presenilin 1/Presenilin 2. These observations have led to the widely-held belief that beta-amyloid has a causative role in the development of AD. However, the distribution of amyloid deposits in the brain does not correlate well with disease progression. These results suggest the possibility that APP metabolites other than beta-amyloid might contribute to Alzheimer's disease. It has been proposed that C99, a specific APP metabolite, may cause neurodegeneration by inducing endosome enlargement , leading to endosomal dysfunction and neuronal death . In this study, we have developed a method to detect C99 in situ, and applied this method to measure the levels of C99 in postmortem human brain samples of individuals at different stages of Alzheimer's disease dementia. We found a correlation between levels of C99 and levels of cognitive decline in areas vulnerable to neurodegeneration, such as the hippocampus and frontal cortex. In the hippocampus, we found increased levels of C99 in the CA1 area and dentate gyrus in both moderate and severe AD, whereas in frontal cortex we found increased levels of C99 only in severe cases of AD, consistent with the well-established delayed involvement of frontal cortex in AD pathology. This correlation was absent in areas resistant to neurodegeneration, such as occipital cortex. In contrast to the area-specific increase in C99 levels, all areas studied in AD patients presented abundant amyloid plaques. Further work is required to understand the underlying mechanism by which C99 accumulates in vulnerable areas of the brain. C99 may accumulate as a consequence of increased synthesis or decreased degradation. Several studies show elevated BACE activity in sporadic AD , which could account for elevation of C99 levels due to increased synthesis. On the other hand, others and we have shown that C99 is degraded through autophagy and the endosomallysosomal pathway , failure of which could account for a reduced degradation. Elucidation of the detailed molecular pathway that leads to the synthesis and breakdown of C99 is likely to reveal targets responsible for the regulation of this metabolite. Such information may greatly accelerate the development of therapeutic agents for the treatment of AD. One limitation of the method described here is that it recognizes only one type of APP b-C-terminal fragment product of BACE cleavage. BACE 1 cleaves APP between Met671 and Asp672 to generate the carboxyl-terminal fragment C99. BACE1 also cleaves APP between Thr681 and Gln682, yielding C89 . Our method is specific for C99, and is not able to detect C89. Regarding beta-amyloid levels, the method used detects beta-amyloid plaques, and it is possible that selected beta-amyloid species may correlate with neuronal vulnerability. Another limitation of the present study is the relatively small sample size, which allowed us to draw correlations between C99 and amyloid levels with cognitive decline, but prevented us from evaluating the correlation of C99 levels with Braak staging. Using a new method to detect specific APP metabolites, we show that C99 accumulates selectively in vulnerable neurons, but not in resistant neurons, in the brains of Alzheimer's disease patients. Approaches to reduce amyloid burden, such as gamma-secretase inhibitors or immunotherapy, despite being successful at reducing beta-amyloid levels, have failed to stop or reduce the cognitive impairment that characterizes Alzheimer's disease. Semagacestat, a gamma-secretase inhibitor, worsened the symptoms of dementia in a large phase III clinical trial . It was speculated that this action might have been due to inhibiting the cleavage by gamma-secretase of other substrates such as Notch. We suggest here that the clinical failure may have been due, at least in part, to an accumulation of C99. Dysfunction of the endo-lysosomal and autophagic pathways are one of the earliest key events in AD, progressing to widespread failure of intraneuronal waste clearance, leading to neuritic dystrophy and finally neuronal cell death. It has been shown that elevated levels of C99 in fibroblasts from Down syndrome and AD induce endosomal dysfunction by increasing the recruitment of APPL1 to rab5 endosomes, where it stabilizes active GTP-rab5, leading to accelerated endocytosis, endosome swelling and impaired axonal transport of rab5 endosomes . It will be interesting to see the specific sites of C99 accumulation in human brain (for instance, using PLA and various vesicle markers) and the effect of such accumulation on the endo-lysosomal and autophagic pathway in human neurons in culture. It has also been shown that C99 accumulation in the mouse brain leads to an increase in Cathepsin B and Lamp1 positive vesicles, as well as increased levels of LC3II and autophagic dysfunction . Although the exact molecular mechanism by which C99 exert this effects is not yet known, it is becoming clear that C99 accumulation contributes to neuronal dysfunction and ultimately neuronal death. Our results show a correlation between C99 levels and cognitive impairment. It has been previously found that synapsis loss is the major correlate of cognitive impairment . To analyze the relationship of C99 with synaptic loss, it could be possible to measure synaptic markers like SNAP25 and synaptophysin , in both vulnerable and resistant brain areas and correlate them with C99 levels. The method that we developed to detect specific APP fragments in fixed brain slides can be modified to quantify APP metabolites in human fluids. To that end, it could be possible to quantify C99 levels in plasma and CSF and correlate them with cognitive deterioration and vulnerability to neurodegeneration, with the goal of developing C99 as an AD biomarker. In conclusion, our results raise the possibility that C99, rather than beta-amyloid, is responsible for the death of nerve cells in Alzheimer's disease. In that case, beta-secretase inhibitors, which lower both C99 and beta-amyloid, are preferable to gamma-secretase inhibitors, which lower beta-amyloid but increase C99 levels. with Bonferroni post-test comparing against non-demented subjects by brain area or cell type. Table S1
// Approach 1: Create Methods that Search for Persons that Match One Characteristic public static void printPersonsOlderThan(List<Person> roster, int age) { for (Person p : roster) { if (p.getAge() >= age) { p.printPerson(); } } }
<gh_stars>1-10 package peji import ( "bytes" "context" "strings" "sync" "time" "github.com/influx6/npkg/njson" "github.com/influx6/sabuhp" "github.com/influx6/sabuhp/mixer" "github.com/influx6/npkg/nerror" "github.com/influx6/npkg/nxid" "github.com/influx6/groundlayer/pkg/domu" "github.com/influx6/groundlayer/pkg/styled" ) const ( DefaultMaxPageIdleness = 5 * time.Minute DefaultPageIdlenessChecksInterval = 2 * time.Minute HeaderSessionIdName = "X-Void-Id" QueryAndCookieSessionIdName = "_groundlayer_id" ) type PageCreator func(name string, theme *styled.Theme, pubsub sabuhp.Transport) *Page type PageSession struct { lastUsed time.Time Id nxid.ID Page *Page } func (ps *PageSession) Close() { ps.lastUsed = time.Time{} ps.Page.Close() } type Logger interface { Log(json *njson.JSON) } type PageSessionManager struct { routePath string maxIdle time.Duration idleCheckInterval time.Duration onAddRoute OnPage theme *styled.Theme Creator PageCreator sessions map[string]PageSession doFunc chan func() canceler context.CancelFunc ctx context.Context waiter sync.WaitGroup starter sync.Once ender sync.Once rl sync.Mutex routes map[string]bool } func NewPageSessionManager( ctx context.Context, routePath string, maxIdle time.Duration, idleCheckInterval time.Duration, creator PageCreator, theme *styled.Theme, onAddRoute OnPage, ) *PageSessionManager { var newCtx, canceler = context.WithCancel(ctx) return &PageSessionManager{ routePath: routePath, ctx: newCtx, canceler: canceler, maxIdle: maxIdle, Creator: creator, theme: theme, onAddRoute: onAddRoute, idleCheckInterval: idleCheckInterval, doFunc: make(chan func(), 0), routes: map[string]bool{}, sessions: map[string]PageSession{}, } } func (psm *PageSessionManager) GetName() string { return psm.routePath } func (psm *PageSessionManager) Wait() { psm.waiter.Wait() } func (psm *PageSessionManager) Stop() { psm.starter.Do(func() { psm.canceler() psm.waiter.Wait() }) } func (psm *PageSessionManager) Start() { psm.starter.Do(func() { psm.waiter.Add(1) go psm.manage() }) } type SessionStat struct { PageName string TotalSessions int Sessions map[string]time.Time } func (psm *PageSessionManager) Stat() (SessionStat, error) { var session = make(chan SessionStat, 1) psm.doFunc <- func() { var stat SessionStat stat.PageName = psm.routePath stat.Sessions = map[string]time.Time{} stat.TotalSessions = len(psm.sessions) for _, ss := range psm.sessions { stat.Sessions[ss.Id.String()] = ss.lastUsed } session <- stat } select { case <-psm.ctx.Done(): return SessionStat{}, nerror.WrapOnly(psm.ctx.Err()) case ss := <-session: return ss, nil } } // Retire returns closes a specific session using giving id. func (psm *PageSessionManager) Retire(sessionId string) error { var session = make(chan error, 1) psm.doFunc <- func() { if ss, hasSession := psm.sessions[sessionId]; hasSession { delete(psm.sessions, sessionId) ss.Page.Close() } session <- nil } select { case <-psm.ctx.Done(): return nerror.WrapOnly(psm.ctx.Err()) case <-session: return nil } } // Session returns a giving page for a giving sessionId. func (psm *PageSessionManager) Session(sessionId string) (*Page, error) { var session = make(chan *Page, 1) var errs = make(chan error, 1) psm.doFunc <- func() { var ss, hasSession = psm.sessions[sessionId] if !hasSession { errs <- nil return } session <- ss.Page } select { case <-psm.ctx.Done(): return nil, nerror.WrapOnly(psm.ctx.Err()) case err := <-errs: return nil, nerror.WrapOnly(err) case page := <-session: return page, nil } } // NewSession returns a new session page and session id. func (psm *PageSessionManager) NewSession(t sabuhp.Transport) (*Page, string, error) { var session = make(chan PageSession, 1) psm.doFunc <- func() { var ps PageSession ps.Id = nxid.New() ps.lastUsed = time.Now() ps.Page = psm.Creator(psm.routePath, psm.theme, t) psm.sessions[ps.Id.String()] = ps ps.Page.OnPageAdd(psm.manageAddPageRoute) session <- ps } select { case <-psm.ctx.Done(): return nil, "", nerror.WrapOnly(psm.ctx.Err()) case ss := <-session: return ss.Page, ss.Id.String(), nil } } func (psm *PageSessionManager) manageAddPageRoute(pageRoute string, p *Page) { psm.rl.Lock() if _, hasRoute := psm.routes[pageRoute]; hasRoute { psm.rl.Unlock() return } psm.routes[pageRoute] = true psm.rl.Unlock() psm.onAddRoute(pageRoute, p) } func (psm *PageSessionManager) manage() { defer psm.waiter.Done() var ticker = time.NewTicker(psm.idleCheckInterval) defer ticker.Stop() doLoop: for { select { case <-psm.ctx.Done(): return case doFn := <-psm.doFunc: doFn() case <-ticker.C: // clean house var nowTime = time.Now() for key, session := range psm.sessions { if nowTime.Sub(session.lastUsed) < psm.maxIdle { continue doLoop } delete(psm.sessions, key) session.Close() } } } } var _ sabuhp.TransportResponse = (*Pages)(nil) type OnPages func(route string, p *Pages) // Pages exists to provider an organization // around sessions and pages. // // It implements the http.Handler interface. type Pages struct { logger Logger prefix string theme *styled.Theme router *mixer.Mux maxIdle time.Duration idleCheck time.Duration ctx context.Context tr sabuhp.Transport sl sync.RWMutex waiter sync.WaitGroup managers map[string]*PageSessionManager onNewPage *PageNotification } func WithPages( ctx context.Context, logger Logger, prefix string, theme *styled.Theme, transport sabuhp.Transport, notFound Handler, ) *Pages { return NewPages( ctx, logger, prefix, DefaultMaxPageIdleness, DefaultPageIdlenessChecksInterval, theme, transport, notFound, ) } func NewPages( ctx context.Context, logger Logger, prefix string, maxIdle time.Duration, idleCheck time.Duration, theme *styled.Theme, transport sabuhp.Transport, notFound Handler, ) *Pages { if notFound == nil { notFound = DefaultNotFound{} } return &Pages{ theme: theme, tr: transport, prefix: prefix, ctx: ctx, logger: logger, maxIdle: maxIdle, idleCheck: idleCheck, onNewPage: NewPageNotification(), managers: map[string]*PageSessionManager{}, router: mixer.NewMux(mixer.MuxConfig{ RootPath: prefix, NotFound: mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) { var d Data d.Message = message d.Path = message.Path if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil { d.SessionId = sessionId } var payload, contentType, err = writeNode(message.ContentType, notFound.Handle(d)) if err != nil { return nil, nerror.WrapOnly(err) } return &sabuhp.Message{ MessageMeta: sabuhp.MessageMeta{ Path: message.Path, ContentType: contentType, SuggestedStatusCode: 200, Headers: sabuhp.Header{ HeaderSessionIdName: []string{d.SessionId}, }, Cookies: []sabuhp.Cookie{ { Name: QueryAndCookieSessionIdName, Value: d.SessionId, }, }, }, FromAddr: prefix, ID: nxid.ID{}, Topic: message.Path, Delivery: message.Delivery, Payload: payload, Metadata: sabuhp.Params{}, Params: sabuhp.Params{}, }, nil }), }), } } func (p *Pages) Wait() { p.waiter.Wait() } type PagesStat struct { TotalPages int PageSessions map[string]SessionStat } // Stats returns current states of existing pages, creators. func (p *Pages) Stats() PagesStat { var totalPages int var stats = map[string]SessionStat{} var stack = njson.Log(p.logger) p.sl.RLock() totalPages = len(p.managers) for page, manager := range p.managers { stat, err := manager.Stat() if err != nil { stack.New().LError().Error("error", err). Message("failed to get stat for page"). String("page", page). End() continue } stats[page] = stat } p.sl.RUnlock() var stat PagesStat stat.PageSessions = stats stat.TotalPages = totalPages return stat } // GetManager returns page creator registered for a giving page page func (p *Pages) Get(pageName string) (*PageSessionManager, error) { var pageHandle = cleanAllSlashes(handlePath(pageName)) var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName)) p.sl.RLock() var manager, exists = p.managers[prefixPage] if !exists { manager, exists = p.managers[pageHandle] } p.sl.RUnlock() if !exists { return nil, nerror.New("not found") } return manager, nil } // HasPage returns true/false if a giving page exists. func (p *Pages) Has(pageName string) bool { var pageHandle = cleanAllSlashes(handlePath(pageName)) var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName)) p.sl.RLock() if _, exists := p.managers[prefixPage]; exists { p.sl.RUnlock() return true } if _, exists := p.managers[pageHandle]; exists { p.sl.RUnlock() return true } p.sl.RUnlock() return false } // AddCreator adds a new PageCreator for a giving page routePath. // It returns true/false based on whether the routePath and creator was registered or if there was routePath conflict. func (p *Pages) Add(pageName string, creatorFunc PageCreator) error { var prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName)) var routerPath = cleanAllSlashes(handlePath(pageName)) var routerPathForMore = cleanAllSlashes(handlePath(p.prefix, pageName, "*path")) if p.Has(prefixPage) { return nerror.New("already exists") } var manager = NewPageSessionManager(p.ctx, prefixPage, p.maxIdle, p.idleCheck, creatorFunc, p.theme, p.onNewPage.Emit) manager.Start() p.waiter.Add(1) go func() { defer p.waiter.Done() manager.Wait() p.sl.Lock() delete(p.managers, prefixPage) p.sl.Unlock() }() p.sl.Lock() p.managers[prefixPage] = manager p.sl.Unlock() var handler = createHandler(prefixPage, manager, p.tr) p.router.Serve(routerPath, handler) p.router.Serve(routerPathForMore, handler) p.onNewPage.Emit(prefixPage, nil) return nil } func (p *Pages) AddOnPageRoute(cb OnPages) { p.onNewPage.Add(func(route string, _ *Page) { cb(route, p) }) } func (p *Pages) Handle(message *sabuhp.Message, tr sabuhp.Transport) sabuhp.MessageErr { var reply, err = p.router.ServeRoute(message) if err != nil { return sabuhp.WrapErr(err, false) } var sendErr error if reply.Delivery == sabuhp.SendToAll { sendErr = tr.SendToAll(reply, -1) } else { sendErr = tr.SendToOne(reply, -1) } if sendErr != nil { return sabuhp.WrapErr(sendErr, false) } return nil } func createHandler(pagePath string, manager *PageSessionManager, tr sabuhp.Transport) mixer.Handler { return mixer.HandlerFunc(func(message *sabuhp.Message) (*sabuhp.Message, error) { var d Data d.Message = message d.Path = message.Path if sessionId, sessionIdErr := getSessionId(message); sessionIdErr == nil { d.SessionId = sessionId } var page *Page var sessionErr error page, sessionErr = manager.Session(d.SessionId) if sessionErr != nil { page, d.SessionId, sessionErr = manager.NewSession(tr) } if sessionErr != nil { return nil, nerror.WrapOnly(sessionErr) } var renderNode = page.Render(d) var payload, contentType, writeErr = writeNode(message.ContentType, renderNode) if writeErr != nil { return nil, nerror.WrapOnly(writeErr) } return &sabuhp.Message{ MessageMeta: sabuhp.MessageMeta{ Path: message.Path, ContentType: contentType, SuggestedStatusCode: 200, Headers: sabuhp.Header{ HeaderSessionIdName: []string{d.SessionId}, }, Cookies: []sabuhp.Cookie{ { Name: QueryAndCookieSessionIdName, Value: d.SessionId, }, }, }, ID: nxid.ID{}, Topic: message.Path, FromAddr: pagePath, Delivery: message.Delivery, Payload: payload, Metadata: sabuhp.Params{}, Params: sabuhp.Params{}, }, nil }) } func writeNode(contentType string, renderNode *domu.Node) ([]byte, string, error) { var content string var renderedOutput = bytes.NewBuffer(make([]byte, 0, 512)) switch contentType { case VoidHTMLDiff: content = VoidHTMLDiff if renderErr := RenderVoidHTMLDiff(renderNode, renderedOutput); renderErr != nil { return nil, "", nerror.WrapOnly(renderErr) } case VoidJSON: content = VoidJSON if renderErr := RenderVoidJSON(renderNode, renderedOutput); renderErr != nil { return nil, "", nerror.WrapOnly(renderErr) } case VoidJSONStream: content = VoidJSONStream if renderErr := RenderVoidJSONStream(renderNode, renderedOutput); renderErr != nil { return nil, "", nerror.WrapOnly(renderErr) } case VoidHTML: fallthrough default: content = PlainHTML if renderErr := RenderVoidHTML(renderNode, renderedOutput); renderErr != nil { return nil, "", nerror.WrapOnly(renderErr) } } return renderedOutput.Bytes(), content, nil } func getSessionId(message *sabuhp.Message) (string, error) { var sessionIdFromQuery = strings.TrimSpace(message.Query.Get(QueryAndCookieSessionIdName)) var sessionIdFromHeader = strings.TrimSpace(message.Headers.Get(HeaderSessionIdName)) var isASession = len(sessionIdFromHeader) != 0 || len(sessionIdFromQuery) != 0 if !isASession { return "", nerror.New("not a session") } if len(sessionIdFromQuery) != 0 { return sessionIdFromQuery, nil } return sessionIdFromHeader, nil }
// GetTextChannel Returns a channel from which the text of a file is exported func GetTextChannel(filepath string) chan string { channel := make(chan string, 100) go getText(filepath, channel) return channel }
<reponame>xcomponent/ReactiveXComponent.js export class Utils { public static removeElementFromArray<T>(array: Array<T>, element: T): void { const index = array.indexOf(element); if (index > -1) { array.splice(index, 1); } else { throw new Error('Element to remove not found'); } } }
Mulberry-Inspired Nickel-Niobium Phosphide on Plasma-Defect-Engineered Carbon Support for High-Performance Hydrogen Evolution. Bimetallic phosphate electrocatalysts on carbon-cloth support are among the most promising industry-relevant solutions for electrocatalytic hydrogen production. To address the persistent issue of hetero-phase interfacing on carbon support while ensuring high activity and stability, a low-cost, high-performance hydrogen evolution reaction (HER) electrocatalyst is developed. Bi-phase Ni12 P5 -Ni4 Nb5 P4 nanocrystals with rich heterointerfaces and phase edges are successfully fabricated on carbon cloth (CC), which is enabled by intentional defect creation by atmospheric pressure dielectric barrier discharge (DBD) plasma (PCC). The obtained Ni12 P5 -Ni4 Nb5 P4 /PCC electrocatalyst exhibits excellent HER performance, heralded by the low overpotentials of 81 and 287 mV for delivering current densities of 10 (j10 ) and 500 (j500 ) mA cm-2 , respectively. Meanwhile, the Ni12 P5 -Ni4 Nb5 P4 /PCC maintains spectacular catalytic activity at high current density region (>j615 ), which outperformed the industry-relevant benchmark Pt/C/PCC catalyst. The catalyst grown on the plasma-treated support shows remarkably longer operation and ultra-stable electrocatalytic characteristics over 100 h continuous operation. Ab initio numerical simulations reveal that Ni atoms exposed in the heterointerfaces act as the main catalytically active centers for HER.
Quantification of the Influence of Deep Basin Effects on Structural Collapse Using SCEC CyberShake Earthquake Ground Motion Simulations This paper examines the effects of earthquake ground motions in deep sedimentary basins on structural collapse risk using physics-based earthquake simulations of the Los Angeles basin developed through the Southern California Earthquake Center's CyberShake project. Distinctive waveform characteristics of deep basin seismograms are used to classify the ground motions into several archetype groups, and the damaging influence of the basin effects are evaluated by comparing nonlinear structural responses under spectrum and significant duration equivalent basin and nonbasin ground motions. The deep basin ground motions are observed to have longer period-dependent durations and larger sustained spectral intensities than nonbasin motions for vibration periods longer than about 1.5 s, which can increase structural collapse risk by up to 20% in ground motions with otherwise comparable peak spectral accelerations and significant durations. Two new metrics are proposed to quantify period-dependent duration effects that are not otherwise captured by conventional ground motion intensity measures. The proposed sustained amplitude response spectra and significant duration spectra show promise for characterizing the damaging effects of long duration features of basin ground motions on buildings and other structures.
Adverse Outcomes of Interrupted Precordial Compression During Automated Defibrillation Background—Current versions of automated external defibrillators (AEDs) require frequent stopping of chest compression for rhythm analyses and capacity charging. The present study was undertaken to evaluate the effects of these interruptions during the operation of AEDs. Methods and Results—Ventricular fibrillation was electrically induced in 20 male domestic swine weighing between 37.5 and 43 kg that were untreated for 7 minutes before CPR was started. Defibrillation was attempted with up to 3 sequential 150-J biphasic shocks, but each was preceded by 3-, 10-, 15-, or 20-second interruptions of chest compression. The interruptions corresponded to those that were mandated by commercially marketed AEDs for rhythm analyses and capacitor charge. The sequence of up to 3 electrical shocks and delays were repeated at 1-minute intervals until the animals were successfully resuscitated or for a total of 15 minutes. Spontaneous circulation was restored in each of 5 animals in which precordial compression was delayed for 3 seconds before the delivery of the first and subsequent shocks but in none of the animals in which the delay was >15 seconds before the delivery of the first and subsequent shocks. Longer intervals of CPR interventions were required, and there was correspondingly greater failure of resuscitation in close relationship to increasing delays. The durations of interruptions were inversely related to the durations of subthreshold levels of coronary perfusion pressure. Postresuscitation arterial pressure and left ventricular ejection fraction were more severely impaired with increasing delays. Conclusions—Interruptions of precordial compression for rhythm analyses that exceed 15 seconds before each shock compromise the outcome of CPR and increase the severity of postresuscitation myocardial dysfunction.
/** * Performs cleanup of all statically referenced data used by Mountiplex. * This will clear all caches to allow for proper garbage collection and re-loading. */ public static void unloadMountiplex() { synchronized (unloaders) { for (Runnable unloader : unloaders) { unloader.run(); } unloaders.clear(); unloaders.trimToSize(); } }
<gh_stars>1-10 # SPDX-License-Identifier: BSD-3-Clause # Copyright Contributors to the OpenDeliveryFormat Project. class ODFFileError(IOError): """ Exception relating to File reading errors within ODF """ pass class ODFValidationError(Exception): """ Exception relating to invalid information """ pass
def to_top(self): self.set('Top', '')
Yesterday, HTC updated their HTC Camera application for devices with HTC Sense. Normally, this isn't worth discussing, but the changelog had an entry for RAW photography mode on the One M9. For those that are unfamiliar with what a RAW photo is, in essence it's exactly what would come to mind when it comes to something that is "raw". The file effectively encodes the sensor's data at each pixel, without any loss or significant processing. Normally, this file is completely hidden from the user, and once it enters the image signal processor (ISP) it is only used to produce the final JPEG and is presumably deleted once the JPEG file is saved to internal storage. As a result, we end up with a file that can be easily read by any computer, but is effectively impossible to change in a meaningful way. A RAW/digital negative can be easily changed to look almost nothing like the original image, but is impossible for most applications to view properly. Fundamentally, a RAW photo is an unprocessed image, and if you could see the RAW image without demosaicing to handle the camera's pixel layout, it would look almost nothing like the final image. However, it's this relatively unprocessed state that allows for potential gains in image quality. After all, the ISP must determine how to process an image without human intervention in an effective manner, and it also must process and save the image within about a third of a second or less. Comparatively speaking, if I process it myself I might spend a solid 5-10 minutes playing with sliders to get a good balance between noise, detail, contrast, and dynamic range, and each image requires a second or two of maximum CPU load on a modern Core i7 4702HQ. The real question is whether RAW photos can make a difference in a camera's output. In my experience this does depend quite a bit. Something like the Nexus 5 didn't really see any additional benefit from hand-processed photos. However, the One M9's RAW photo mode reveals a pretty stunning difference in photo quality when properly processed. In order to show it off, I took a few quick comparison shots. Our first comparison is a daytime shot, which is only directly comparable between the M9's JPEG and RAW output. iPhone 6 JPEG M9 RAW M9 We'll start with a very simple daytime comparison. Please note that for this shot it is only comparable between the M9 RAW and JPEG output, as the iPhone 6 shot is from the same place, but at a different time of day and multiple weeks apart. In the original review, I mentioned that the One M9 seemed to have detail equivalent to the iPhone 6 with its 8MP camera in daytime. This single test shows the massive improvements that can be found with some quick post-processing in Lightroom, with special emphasis on the improved shadow detail in the bottom left side of the photo, the dramatically improved detail on the building to the top left of the photo, and improved detail on pretty much any tree in this photo. There is a noticeable grain to the photo that isn't as pronounced on the ISP-processed JPEG, but the noise is simply masked by blurring rather than actually getting canceled out. Again, it's important to note that the iPhone 6 photo isn't quite at the same angle or from the same time as the One M9, but it should serve as a good reference for objects in the scene. The real test is in low light, and for this I brought some additional phones and a tripod to try and get a test that was better-controlled, although this isn't a controlled test in the strictest sense to begin with. JPEG M9 RAW M9 iPhone 6 Plus Galaxy S6 In this test, the One M9's native JPEG output is by far the worst here. The processing is surprisingly poor, with generous amounts of color noise on mostly flat, yellow-white walls. As color noise is the most distracting type of noise in a photo, it's generally better to err towards strong rather than weak color noise reduction. The RAW processed in Lightroom by comparison is miles ahead. It's probably not better than the iPhone 6 Plus here, as things like the rough stone walls lining the steps are extremely hard to make out in comparison to the iPhone 6 Plus. The Galaxy S6 is easily the brightest of all, but it seems that Samsung might have gone a bit too far as detail along the steps is definitely worse than the iPhone 6 Plus and there is much more noise in some parts of the photo. However, the Galaxy S6 manages to equal, if not better the iPhone 6 Plus in this shot due to the sheer amount of additonal detail captured from the better exposure; the resulting noise ultimately is a liability in large, HiDPI displays or in prints, but it isn't really visible when scaled to web/phone sizes. < JPEG M9 RAW M9 iPhone 6 Plus Galaxy S6 Xiaomi Mi Note Once again, the native JPEG output on the M9 is rather poor. Strong luminance noise reduction turns the picture into an oil painting when viewed at 100% crop, yet there's significant color and luminance noise still visible throughout the photo when scaled to web resolutions. There's almost no detail to speak of either due to the noise reduction used. Probably the best example of these issues is the trash can on the left side of the photo. Viewing the native JPEG shows a sign with nothing on it, but viewing the RAW-processed photo actually reveals that it is a recycling bin. Once again, even with RAW processing I didn't manage to get the M9 to beat the iPhone 6 Plus, but given the lack of OIS it shouldn't be too much of a surprise. Strangely enough, the RAW doesn't have enough contrast to show the red sign in the distance, which shows up just fine on the native JPEG and on every other shot. Meanwhile the Galaxy S6 is once again easily the brightest in all of these shots, but it looks like ISO is a bit too high and the processing is also a bit too aggressive as the foliage in the scene is clearly not as cleanly rendered as it is on the iPhone 6 Plus. At web resolutions it is easily a match for the iPhone 6 Plus in these conditions as the noise isn't as visible when viewed at such a scaled resolution, while the better exposure helps with improved detail in the dark areas of the scene. The Xiaomi Mi Note does reasonably well here, but on 100% crop the noise reduction is quite strong and reduces visible detail. Overall, HTC should be applauded for bringing RAW photo mode to the One M9. For anyone that has bought one, they can still take some truly good photos if they're willing to learn how to use a tool like Lightroom or Photoshop's RAW processing tools. However, there is no lens correction profile available, so vignetting and other distortions are a noticeable problem as seen in these photos. It's possible to try and compensate for these problems by eye like I did for these test shots, but HTC should really be releasing their own lens correction profile if they really want to support serious photography with the One M9. As for the JPEG image quality situation, the fact that it's possible to get so much improvement in image quality from processing a RAW by hand compared to the JPEG output is a very real problem for HTC. Now admittedly a hand-touched RAW will almost always be better than a quickly processed JPEG - and we going into this expecting as much.- but such a large difference, and at times significant loss of detail, indicates that HTC is losing too much quality with their JPEG output. Compared to their competitors, the end result is an image with worse quality than what I believe they could have otherwise achieved. Ultimately I'm not interested in pointing fingers here, but the output simply isn't competitive. It's possible that the ISP simply isn't up to scratch, as there are some very real problems with auto-focus speed and reliability as I noted in the original review. It's also possible that HTC is electing to use an extremely aggressive post-processing combined with strong JPEG compression to reduce file sizes. This is simply a problem that has to be fixed if HTC is determined to remain relevant in the hyper-competitive smartphone space. The one truth that is clear to me here is that the sensor is far from the main problem here. A better sensor might help with high ISO noise, and PDAF should definitely help with focus speed, but in the case of the One M9's rear camera the biggest issues seem to be poor post-processing, a lack of high-quality optical image stabilization (OIS), and a smaller aperture compared to other phones on the market in that order. A better sensor can and will help, but it's far from a silver bullet. This does mean that there's some hope for the One M9's camera though, as most of the problems I've described could be resolved with an OTA update. The question now is whether HTC will follow through.
class Segments: """Class representing an `Adafruit 14-segment AlphaNumeric FeatherWing <https://www.adafruit.com/product/3139>`_. Automatically uses the feather's I2C bus.""" def __init__(self): self._segments = None def print(self, value): """ Print a number or text to the display :param value: The text or number to display :type value: str or int or float """ self._segments.print(value) self._segments.show() def marquee(self, text, delay=0.25, loop=True): """ Automatically scroll the text at the specified delay between characters :param str text: The text to display :param float delay: (optional) The delay in seconds to pause before scrolling to the next character (default=0.25) :param bool loop: (optional) Whether to endlessly loop the text (default=True) """ if isinstance(text, str): self.fill(False) if loop: while True: self._scroll_marquee(text, delay) else: self._scroll_marquee(text, delay) def _scroll_marquee(self, text, delay): """ Scroll through the text string once using the delay """ char_is_dot = False for character in text: self._segments.print(character) # Add delay if character is not a dot or more than 2 in a row if character != '.' or char_is_dot: sleep(delay) char_is_dot = (character == '.') self._segments.show() def fill(self, fill): """Change all Segments on or off :param bool fill: True turns all segments on, False turns all segments off """ if isinstance(fill, bool): self._segments.fill(1 if fill else 0) self._segments.show() else: raise ValueError('Must set to either True or False.') @property def blink_rate(self): """ Blink Rate returns the current rate that the text blinks. 0 = Off 1-3 = Successively slower blink rates """ return self._segments.blink_rate @blink_rate.setter def blink_rate(self, rate): self._segments.blink_rate = rate @property def brightness(self): """ Brightness returns the current display brightness. 0-15 = Dimmest to Brightest Setting """ return self._segments.brightness @brightness.setter def brightness(self, brightness): self._segments.brightness = brightness
/* * scf_decode32() is an implementation of Base32 decoding as described in * section 6 of RFC 4648 - "The Base16, Base32, and Base64 Data * Encodings". See http://www.ietf.org/rfc/rfc4648.txt?number=4648. The * input stream is divided into groups of 8 encoded characters. Each * encoded character represents 5 bits of data. Thus, the 8 encoded * characters are used to produce 40 bits or 5 bytes of unencoded data in * outbuf. * * If the encoder did not have enough data to generate a mulitple of 8 * characters of encoded data, it used a pad character to get to the 8 * character boundry. The standard specifies that the pad character is '='. * Unfortunately, '=' is not a legal character in SMF property names. * Thus, the caller can specify an alternate pad character with the pad * argument. If pad is 0, scf_decode32() will use '='. Note that use of * anything other than '=' is not in conformance with RFC 4648. It is * suitable, however, for internal use of SMF software. When the encoded * data is used in SMF property names, SCF_ENCODE32_PAD should be used as * the pad character. * * Arguments: * in - Buffer of encoded characters. * inlen - Number of characters at in. * outbuf - Buffer to receive the decoded bytes. It can be the * same buffer as in. * outmax - Size of the buffer at outbuf. * outlen - If it is not NULL, outlen receives the number of * bytes placed in output. * pad - Alternate padding character. * * Returns: * 0 Buffer was successfully decoded. * -1 Indicates an invalid input character, output buffer too * small, or pad is one of the standard encoding characters. */ int scf_decode32(const char *in, size_t inlen, char *outbuf, size_t outmax, size_t *outlen, char pad) { char *bufend = outbuf + outmax; char c; uint_t count; uint32_t g[DECODE32_GS]; size_t i; uint_t j; char *out = outbuf; boolean_t pad_seen = B_FALSE; if (pad == 0) { pad = '='; } else { for (i = 0; i < sizeof (base32) - 1; i++) { if (pad == base32[i]) return (-1); } } i = 0; while ((i < inlen) && (out < bufend)) { for (j = 0, count = 0; (j < DECODE32_GS) && (i < inlen); i++) { c = in[i]; if ((c == '\r') || (c == '\n')) continue; if ((pad_seen == B_TRUE) && (c != pad)) { return (-1); } if ((c < 0) || (c >= sizeof (index32))) { return (-1); } if (c == pad) { pad_seen = B_TRUE; continue; } if ((g[j++] = index32[c]) == 0xff) { return (-1); } count++; } if ((count >= 2) && (out < bufend)) { *out++ = (g[0] << 3) | ((g[1] >> 2) & 0x7); } if ((count >= 4) && (out < bufend)) { *out++ = (g[1] << 6) | (g[2] << 1) | \ ((g[3] >> 4) & 0x1); } if ((count >= 5) && (out < bufend)) { *out++ = (g[3] << 4) | ((g[4] >> 1) & 0xf); } if ((count >= 7) && (out < bufend)) { *out++ = (g[4] << 7) | (g[5] << 2) | ((g[6] >> 3) & 0x3); } if ((count == 8) && (out < bufend)) { *out++ = (g[6] << 5) | g[7]; } } if (i < inlen) { return (-1); } if (outlen) *outlen = out - outbuf; if (out < bufend) *out = 0; return (0); }
The beauty of the Taj Mahal is overshadowed by some of the world's ugliest poverty. Credit:AP It is against this backdrop that generations of western travellers have sought the authentic Indian experience. But with most taking the same well-worn route, the fast-paced, seemingly unregulated, and often unforgiving tourism industry that has sprung up in response means they get anything but. From Delhi's infamous dodgy travel agents to train station clerks who demand an extra "tax," to the rickshaw drivers who scoop huge commissions for delivering green tourists to waiting shopkeepers, the scams come thick and fast. Falling for them is a right of passage. Indeed, I fell for more than one and I'm mostly okay with that. Why shouldn't struggling locals make as much from us westerners as they can? After all, no matter how tight our travel budget, we are always going to come out of it the better off. Moreover, given all the wealth the West has taken from the subcontinent over the past couple of centuries, I am the last to begrudge some of them reclaiming it, rupee by rupee. When you are in the thick of it, however, it can feel demoralising. Every conversation seems burdened by the weight of an expected transaction, including for solo females, a sexual one. A few days of this and you start to feel like the losing player in a game where the aim is to separate you from all your money as quickly as possible. As one young Welsh man I met said sadly, "I feel like a walking cash machine." All of which means that, on the tourist trail at least, the chances of a genuine connection seems impossible. My frustrated attempts at honest conversations with local shopkeepers and street vendors I spoke to led either to a lighter wallet or inappropriate sexual invitations or both. Not that meaningful encounters with local hawkers is what all tourists are after. But, in the relatively short time I was there, I was never quite clear on what it is we are after. Is travelling really only about ticking off the major attractions, avoiding the beggars, and insisting shopkeepers knock the equivalent of an extra $1 from the price of harem pants that are marketed as Indian but sold exclusively to tourists? To be clear; I do not blame Indians for this. Rather, I suspect our enduring love of "doing India" appears to have fomented a situation where enterprising and desperate locals seek to profit by giving us what they think we want. But what we get is an imitation of the real thing. It feels exploitative to be a part of this, to waltz in from another, wealthier country, get ushered into the famous sites, hang out with other westerners, and then waltz away again. On to the next city, the next attraction, the next "experience." Despite all the work I do trying to undermine it, I too am a beneficiary of white, western privilege. Of course, this state of affairs is not limited to India. Perhaps it's the fact I am no longer a wide-eyed backpacker on my first Big Trip, or my anti-colonialist politics talking here (and colonialism has certainly left its mark here), but it feels as though in India, all pretence is abandoned and what we are left with is the stark, ugly reality of privilege and inequality. By the start of my fifth and final week, it dawned on me that, to all intents and purposes, in India I am a "white person." Disheartened at how often western tourists were approached by locals for selfies (the whiter-looking, the more their image is sought), at first I had the luxury of distancing myself from these particular interactions, of kidding myself I had more in common with the locals than the white tourists they seemed to idolise. But when, in the absence of an actual European-descended tourist, I was also asked to pose in family happy snaps, it became uncomfortably clear to me the lens through which I was viewed. Certainly, the compliments on my "fair" skin and warnings not to spend too much time in the sun or risk becoming "dark like us" were meant to flatter. But they sent a brutal message that, despite all the work I do trying to undermine it, I too am a beneficiary of white, western privilege. From walking the streets on my own, to the thousands of rupees I withdrew from the ATM without blinking, to the bikini I wore by the lake in the tiny southern town of Hampi, where tourists sunbake on ancient ethereal boulders while astonished Indian men gather to gawk at the unusual antics of "white people"; all of these signalled a status and privilege that, relative to much of the local population, I had in abundance.
/*! Copyright (c) 2021, XAPPmedia */ import { RequestSlotMap, RequestSlotValues } from "stentor-models"; import { requestSlotValueToString } from "./requestSlotValueToString"; /** * Returns the slot value, if not found it returns undefined. * * @param slots * @param name * @returns */ export function getSlotValue(slots: RequestSlotMap, name: string): RequestSlotValues | undefined { if (!slots) { return undefined; } const value = slots[name]; return value ? value.value : undefined; } /** * From the provided slot map, it returns the slot value as a string or an empty string * if it doesn't find the value. * * @param request * @param name */ export function slot(slots: RequestSlotMap, name: string): string { const value = getSlotValue(slots, name); return value ? requestSlotValueToString(value) : ""; }
import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintWriter; import java.io.Reader; import java.util.Arrays; import java.util.Comparator; import java.util.StringTokenizer; import java.util.TreeSet; // Codeforces Round #299 (Div. 2) // Tavas and Pashmaks public class Problem_535_E { public static void main(String[] args) { InputStream inputStream = System.in; OutputStream outputStream = System.out; PrintWriter out = new PrintWriter(outputStream); Problem_535_E solver = new Problem_535_E(); solver.solve(1, new InputStreamReader(inputStream), out); out.close(); } static class Pair implements Comparable<Pair> { int first; int second; Pair(int f, int s) { first = f; second = s; } public int compareTo(Pair o) { int r = first - o.first; if(r == 0) r = second - o.second; return r; } } long vec(int a, int b, int c) { return 1l * X[c] * Y[a] * (X[b] - X[a]) * (Y[c] - Y[b]) - 1l * X[a] * Y[c] * (Y[b] - Y[a]) * (X[c] - X[b]); } int[] X; int[] Y; public void solve(int test, Reader input, PrintWriter out) { MyScanner in = new MyScanner(input); int n = in.nextInt(); X = new int[n]; Y = new int[n]; TreeSet<Pair> used = new TreeSet<Pair>(); for (int i = 0; i < n; i++) { X[i] = in.nextInt(); Y[i] = in.nextInt(); } int apt = 0; Integer[] A = new Integer[n]; for (int i = 0; i < n; i++) { Pair p = new Pair(X[i], Y[i]); if (used.contains(p)) continue; else { A[apt++] = i; used.add(p); } } A = Arrays.copyOf(A, apt); int pos = 0; int neg = 0; for (int i = 1; i < A.length; i++) { if (Y[A[i]] > Y[A[pos]] || (Y[A[i]] == Y[A[pos]] && X[A[i]] > X[A[pos]])) pos = i; if (X[A[i]] > X[A[neg]] || (X[A[i]] == X[A[neg]] && Y[A[i]] > Y[A[neg]])) neg = i; } neg = A[neg]; int t = A[0]; A[0] = A[pos]; A[pos] = t; pos = A[0]; final Integer[] finalA = A; Arrays.sort(A, 1, A.length, new Comparator<Integer>() { @Override public int compare(Integer a, Integer b) { long v = vec(finalA[0], a, b); if (v != 0) return Long.compare(v, 0); else { int da = (X[a] - X[finalA[0]]) * (X[a] - X[finalA[0]]) + (Y[a] - Y[finalA[0]]) * (Y[a] - Y[finalA[0]]); int db = (X[b] - X[finalA[0]]) * (X[b] - X[finalA[0]]) + (Y[b] - Y[finalA[0]]) * (Y[b] - Y[finalA[0]]); return Long.compare(da, db); } } }); int[] H = new int[A.length]; int pt = 0; H[pt++] = A[0]; if (pos != neg) { for (int i = 1; i < A.length; i++) { int p = A[i]; while (true) { if (pt < 2) break; int a = H[pt - 2]; int b = H[pt - 1]; long v = vec(a, b, p); if (v > 0) --pt; else break; } H[pt++] = p; if (p == neg) break; } } Arrays.sort(H, 0, pt); used.clear(); for (int i = 0; i < pt; i++) used.add(new Pair(X[H[i]], Y[H[i]])); int cnt = 0; for (int i = 0; i < n; i++) { if (used.contains(new Pair(X[i], Y[i]))) { if(cnt++ > 0) out.print(' '); out.printf("%d", i + 1); } } out.println(); } //-----------MyScanner class for faster input---------- public static class MyScanner { BufferedReader br; StringTokenizer st; public MyScanner(Reader reader) { br = new BufferedReader(reader); } String next() { while (st == null || !st.hasMoreElements()) { try { st = new StringTokenizer(br.readLine()); } catch (IOException e) { e.printStackTrace(); } } return st.nextToken(); } int nextInt() { return Integer.parseInt(next()); } long nextLong() { return Long.parseLong(next()); } double nextDouble() { return Double.parseDouble(next()); } String nextLine(){ String str = ""; try { str = br.readLine(); } catch (IOException e) { e.printStackTrace(); } return str; } } //-------------------------------------------------------- }
/** * Utility class with factory methods to create different types of MQTT messages. */ public final class MqttMessageFactory { public static MqttMessage newMessage(MqttFixedHeader mqttFixedHeader, Object variableHeader, Object payload) { switch (mqttFixedHeader.messageType()) { case CONNECT: return new MqttConnectMessage( mqttFixedHeader, (MqttConnectVariableHeader) variableHeader, (MqttConnectPayload) payload); case CONNACK: return new MqttConnAckMessage(mqttFixedHeader, (MqttConnAckVariableHeader) variableHeader); case SUBSCRIBE: return new MqttSubscribeMessage( mqttFixedHeader, (MqttMessageIdVariableHeader) variableHeader, (MqttSubscribePayload) payload); case SUBACK: return new MqttSubAckMessage( mqttFixedHeader, (MqttMessageIdVariableHeader) variableHeader, (MqttSubAckPayload) payload); case UNSUBACK: return new MqttUnsubAckMessage( mqttFixedHeader, (MqttMessageIdVariableHeader) variableHeader); case UNSUBSCRIBE: return new MqttUnsubscribeMessage( mqttFixedHeader, (MqttMessageIdVariableHeader) variableHeader, (MqttUnsubscribePayload) payload); case PUBLISH: return new MqttPublishMessage( mqttFixedHeader, (MqttPublishVariableHeader) variableHeader, (ByteBuf) payload); case PUBACK: return new MqttPubAckMessage(mqttFixedHeader, (MqttMessageIdVariableHeader) variableHeader); case PUBREC: case PUBREL: case PUBCOMP: return new MqttMessage(mqttFixedHeader, variableHeader); case PINGREQ: case PINGRESP: case DISCONNECT: return new MqttMessage(mqttFixedHeader); default: throw new IllegalArgumentException("unknown message type: " + mqttFixedHeader.messageType()); } } public static MqttMessage newInvalidMessage(Throwable cause) { return new MqttMessage(null, null, null, DecoderResult.failure(cause)); } public static MqttMessage newInvalidMessage(MqttFixedHeader mqttFixedHeader, Object variableHeader, Throwable cause) { return new MqttMessage(mqttFixedHeader, variableHeader, null, DecoderResult.failure(cause)); } private MqttMessageFactory() { } }
def _similarity(self, anchor: tuple, num_samples: int) -> Tuple[np.ndarray, np.ndarray]: return self.perturb_sentence( anchor, num_samples, **self.perturb_opts, )
/** * Cucumber reporter for ReportPortal that reports scenarios as test methods. * <p> * Mapping between Cucumber and ReportPortal is as follows: * <ul> * <li>feature - TEST</li> * <li>scenario - STEP</li> * <li>step - log item</li> * </ul> * <p> * Dummy "Root Test Suite" is created because in current implementation of RP * test items cannot be immediate children of a launch * <p> * Background steps and hooks are reported as part of corresponding scenarios. * Outline example rows are reported as individual scenarios with [ROW NUMBER] * after the name. * * @author Sergey_Gvozdyukevich * @author Serhii Zharskyi */ public class ScenarioReporter extends AbstractReporter { private static final String SEPARATOR = "-------------------------"; protected Supplier<Maybe<String>> rootSuiteId; @Override protected void beforeLaunch() { super.beforeLaunch(); startRootItem(); } @Override protected void beforeStep(TestStep testStep) { Step step = currentScenarioContext.getStep(testStep); String decoratedStepName = decorateMessage(Utils.buildNodeName(currentScenarioContext.getStepPrefix(), step.getKeyword(), testStep.getStepText(), " ")); String multilineArg = Utils.buildMultilineArgument(testStep); Utils.sendLog(decoratedStepName + multilineArg, "INFO", null); } @Override protected void afterStep(Result result) { reportResult(result, decorateMessage("STEP " + result.getStatus().toString().toUpperCase())); } @Override protected void beforeHooks(Boolean isBefore) { // noop } @Override protected void afterHooks(Boolean isBefore) { // noop } @Override protected void hookFinished(TestStep step, Result result, Boolean isBefore) { reportResult(result, (isBefore ? "@Before" : "@After") + "\n" + step.getCodeLocation()); } @Override protected String getFeatureTestItemType() { return "TEST"; } @Override protected String getScenarioTestItemType() { return "STEP"; } @Override protected Maybe<String> getRootItemId() { return rootSuiteId.get(); } @Override protected void afterLaunch() { finishRootItem(); super.afterLaunch(); } /** * Start root suite */ protected void finishRootItem() { Utils.finishTestItem(RP.get(), rootSuiteId.get()); rootSuiteId = null; } /** * Start root suite */ protected void startRootItem() { rootSuiteId = Suppliers.memoize(new Supplier<Maybe<String>>() { @Override public Maybe<String> get() { StartTestItemRQ rq = new StartTestItemRQ(); rq.setName("Root User Story"); rq.setStartTime(Calendar.getInstance().getTime()); rq.setType("STORY"); return RP.get().startTestItem(rq); } }); } /** * Add separators to log item to distinguish from real log messages * * @param message to decorate * @return decorated message */ private String decorateMessage(String message) { return ScenarioReporter.SEPARATOR + message + ScenarioReporter.SEPARATOR; } }
<reponame>luisvt-angular/ngx-dt-table<filename>projects/demo/src/app/pages/column-filter/column-filter.component.ts import { Component, OnInit } from '@angular/core'; import { DtTableComponent } from 'ngx-dt-table'; import { MatButtonToggleChange } from '@angular/material/button-toggle'; @Component({ selector: 'app-column-filter', templateUrl: './column-filter.component.html', styleUrls: ['./column-filter.component.scss'] }) export class ColumnFilterComponent implements OnInit { players = [ {id: 1, age: 27, club: 'Barcelona', name: '<NAME>', national: 'Argentina'}, {id: 2, age: 29, club: 'Real Madrid', name: '<NAME>', national: 'Portugal'}, {id: 3, age: 34, club: 'Liverpool', name: '<NAME>', national: 'England'}, {id: 4, age: 22, club: 'Barcelona', name: 'Neymar', national: 'Brazil'}, {id: 5, age: 25, club: 'Borussia Dortmund', name: '<NAME>', national: 'Germany'}, {id: 6, age: 26, club: 'Real Madrid', name: '<NAME>', national: 'France'}, {id: 7, age: 30, club: 'Manchester United', name: '<NAME>', national: 'Holland'}, {id: 8, age: 28, club: 'Manchester City', name: '<NAME>', national: 'Spain'}, {id: 9, age: 35, club: 'Juventus', name: '<NAME>', national: 'Italy'}, ]; filteringAction = 'Start'; constructor() { } ngOnInit(): void { } toggleFiltering(event: MatButtonToggleChange, playersTable: DtTableComponent): void { if (event.source.checked) { playersTable.startFiltering(); this.filteringAction = 'End'; } else { playersTable.endFiltering(); this.filteringAction = 'Start'; } } }
<reponame>pkdcryptos/https-github.com-alpaca-finance-alpaca-contract-1 import { ethers, upgrades, waffle } from "hardhat"; import { Signer, BigNumberish, utils, Wallet } from "ethers"; import chai from "chai"; import { solidity } from "ethereum-waffle"; import "@openzeppelin/test-helpers"; import { MockERC20, MockERC20__factory, StrategyAddBaseTokenOnly, StrategyAddBaseTokenOnly__factory, UniswapV2Factory, UniswapV2Factory__factory, UniswapV2Pair, UniswapV2Pair__factory, UniswapV2Router02, UniswapV2Router02__factory, WETH, WETH__factory } from "../typechain"; chai.use(solidity); const { expect } = chai; describe('Pancakeswap - StrategyAddBaseTokenOnly', () => { const FOREVER = '2000000000'; /// Uniswap-related instance(s) let factory: UniswapV2Factory; let router: UniswapV2Router02; let lp: UniswapV2Pair; /// Token-related instance(s) let weth: WETH; let baseToken: MockERC20; let quoteToken: MockERC20; /// Strategy-ralted instance(s) let strat: StrategyAddBaseTokenOnly; // Accounts let deployer: Signer; let alice: Signer; let bob: Signer; // Contract Signer let baseTokenAsAlice: MockERC20; let baseTokenAsBob: MockERC20; let lpAsAlice: UniswapV2Pair; let lpAsBob: UniswapV2Pair; let quoteTokenAsAlice: MockERC20; let quoteTokenAsBob: MockERC20; let routerAsAlice: UniswapV2Router02; let routerAsBob: UniswapV2Router02; let stratAsAlice: StrategyAddBaseTokenOnly; let stratAsBob: StrategyAddBaseTokenOnly; beforeEach(async () => { [deployer, alice, bob] = await ethers.getSigners(); // Setup Uniswap const UniswapV2Factory = (await ethers.getContractFactory( "UniswapV2Factory", deployer )) as UniswapV2Factory__factory; factory = await UniswapV2Factory.deploy((await deployer.getAddress())); await factory.deployed(); const WETH = (await ethers.getContractFactory( "WETH", deployer )) as WETH__factory; weth = await WETH.deploy(); await factory.deployed(); const UniswapV2Router02 = (await ethers.getContractFactory( "UniswapV2Router02", deployer )) as UniswapV2Router02__factory; router = await UniswapV2Router02.deploy(factory.address, weth.address); await router.deployed(); /// Setup token stuffs const MockERC20 = (await ethers.getContractFactory( "MockERC20", deployer )) as MockERC20__factory baseToken = await upgrades.deployProxy(MockERC20, ['BTOKEN', 'BTOKEN']) as MockERC20; await baseToken.deployed(); await baseToken.mint(await alice.getAddress(), ethers.utils.parseEther('100')); await baseToken.mint(await bob.getAddress(), ethers.utils.parseEther('100')); quoteToken = await upgrades.deployProxy(MockERC20, ['FTOKEN', 'FTOKEN']) as MockERC20; await quoteToken.deployed(); await quoteToken.mint(await alice.getAddress(), ethers.utils.parseEther('10')); await quoteToken.mint(await bob.getAddress(), ethers.utils.parseEther('10')); await factory.createPair(baseToken.address, quoteToken.address); lp = await UniswapV2Pair__factory.connect(await factory.getPair(quoteToken.address, baseToken.address), deployer); const StrategyAddBaseTokenOnly = (await ethers.getContractFactory( "StrategyAddBaseTokenOnly", deployer )) as StrategyAddBaseTokenOnly__factory; strat = await upgrades.deployProxy(StrategyAddBaseTokenOnly, [router.address]) as StrategyAddBaseTokenOnly; await strat.deployed(); // Assign contract signer baseTokenAsAlice = MockERC20__factory.connect(baseToken.address, alice); baseTokenAsBob = MockERC20__factory.connect(baseToken.address, bob); quoteTokenAsAlice = MockERC20__factory.connect(quoteToken.address, alice); quoteTokenAsBob = MockERC20__factory.connect(quoteToken.address, bob); routerAsAlice = UniswapV2Router02__factory.connect(router.address, alice); routerAsBob = UniswapV2Router02__factory.connect(router.address, bob); lpAsAlice = UniswapV2Pair__factory.connect(lp.address, alice); lpAsBob = UniswapV2Pair__factory.connect(lp.address, bob); stratAsAlice = StrategyAddBaseTokenOnly__factory.connect(strat.address, alice); stratAsBob = StrategyAddBaseTokenOnly__factory.connect(strat.address, bob); }); it('should revert on bad calldata', async () => { // Bob passes some bad calldata that can't be decoded await expect( stratAsBob.execute(await bob.getAddress(), '0', '0x1234') ).to.be.reverted; }); it('should convert all BTOKEN to LP tokens at best rate', async () => { // Alice adds 0.1 FTOKEN + 1 WBTC await quoteTokenAsAlice.approve(router.address, ethers.utils.parseEther('0.1')); await baseTokenAsAlice.approve(router.address, ethers.utils.parseEther('1')); // Add liquidity to the WBTC-FTOKEN pool on Uniswap await routerAsAlice.addLiquidity( baseToken.address, quoteToken.address, ethers.utils.parseEther('1'), ethers.utils.parseEther('0.1'), '0', '0', await alice.getAddress(), FOREVER); // Bob transfer 0.1 WBTC to StrategyAddBaseTokenOnly first await baseTokenAsBob.transfer(strat.address, ethers.utils.parseEther('0.1')); // Bob uses AddBaseTokenOnly strategy to add 0.1 WBTC await stratAsBob.execute( await bob.getAddress(), '0', ethers.utils.defaultAbiCoder.encode( ['address','address', 'uint256'], [baseToken.address, quoteToken.address, '0'] ) ); expect(await lp.balanceOf(await bob.getAddress())).to.be.bignumber.eq(ethers.utils.parseEther('0.015403813051483008')) expect(await lp.balanceOf(strat.address)).to.be.bignumber.eq(ethers.utils.parseEther('0')) expect(await quoteToken.balanceOf(strat.address)).to.be.bignumber.eq(ethers.utils.parseEther('0')) // Bob uses AddBaseTokenOnly strategy to add another 0.1 WBTC await baseTokenAsBob.transfer(strat.address, ethers.utils.parseEther('0.1')); await lpAsBob.transfer(strat.address, ethers.utils.parseEther('0.015403813051483008')); await stratAsBob.execute( await bob.getAddress(), '0', ethers.utils.defaultAbiCoder.encode( ['address', 'address', 'uint256'], [baseToken.address, quoteToken.address, ethers.utils.parseEther('0.01')] ) ); expect(await lp.balanceOf(await bob.getAddress())).to.be.bignumber.eq(ethers.utils.parseEther('0.030128652046542732')) expect(await lp.balanceOf(strat.address)).to.be.bignumber.eq(ethers.utils.parseEther('0')) expect(await quoteToken.balanceOf(strat.address)).to.be.bignumber.eq(ethers.utils.parseEther('0')) // Bob uses AddBaseTokenOnly strategy yet again, but now with an unreasonable min LP request await baseTokenAsBob.transfer(strat.address, ethers.utils.parseEther('0.1')) await expect( stratAsBob.execute( await bob.getAddress(), '0', ethers.utils.defaultAbiCoder.encode( ['address', 'address', 'uint256'], [baseToken.address, quoteToken.address, ethers.utils.parseEther('0.05')] ), ) ).to.be.revertedWith('insufficient LP tokens received'); }); });
/** * Copyright 2011-2016 Asakusa Framework Team. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.asakusafw.bulkloader.recoverer; import java.sql.Connection; import java.text.MessageFormat; import java.util.Date; import java.util.List; import com.asakusafw.bulkloader.bean.ExportTargetTableBean; import com.asakusafw.bulkloader.bean.ExportTempTableBean; import com.asakusafw.bulkloader.bean.ExporterBean; import com.asakusafw.bulkloader.common.BulkLoaderInitializer; import com.asakusafw.bulkloader.common.ConfigurationLoader; import com.asakusafw.bulkloader.common.Constants; import com.asakusafw.bulkloader.common.DBAccessUtil; import com.asakusafw.bulkloader.common.DBConnection; import com.asakusafw.bulkloader.common.ExportTempTableStatus; import com.asakusafw.bulkloader.common.JobFlowParamLoader; import com.asakusafw.bulkloader.exception.BulkLoaderSystemException; import com.asakusafw.bulkloader.exporter.ExportDataCopy; import com.asakusafw.bulkloader.exporter.LockRelease; import com.asakusafw.bulkloader.log.Log; import com.asakusafw.runtime.core.context.RuntimeContext; /** * Recovererの実行クラス。 * * @author yuta.shirai */ public class Recoverer { static final Log LOG = new Log(Recoverer.class); /** * Exporterで読み込むプロパティファイル。 */ private static final List<String> PROPERTIES = Constants.PROPERTIES_DB; /** * 起動方法 ジョブフロー実行ID指定有無。 */ private boolean hasExecutionId = false; /** * 処理結果 Revovery対象のジョブフローインスタンスが存在したか。 */ private boolean isExistJobFlowInstance = false; /** * 処理結果 ロールバックしたジョブフローインスタンスが存在したか。 */ private boolean isExistRollBack = false; /** * 処理結果 他プロセスで処理中のジョブフローインスタンスが存在したか。 */ private boolean isExistExecOthProcess = false; /** * 処理結果 リカバリに失敗したジョブフローインスタンスが存在したか。 */ private boolean isExistRecoveryFail = false; /** * プログラムエントリ。 * コマンドライン引数として以下の値をとる。 <pre> ・args[0]=ターゲット名 ・args[1]=ジョブフロー実行ID </pre> * @param args コマンドライン引数 */ public static void main(String[] args) { RuntimeContext.set(RuntimeContext.DEFAULT.apply(System.getenv())); Recoverer recoverer = new Recoverer(); int result = recoverer.execute(args); System.exit(result); } /** * Recovererの処理を実行する。 * 処理結果として以下のコードを返す。 * ・0:全てのジョブフローインスタンスに対するリカバリ処理がロールフォワードであり処理に成功した場合 * または、処理対象のジョブフローインスタンスが存在しなかった場合。 * ・1:処理が異常終了した場合 * ・2:一部または全ての処理対象ジョブフローインスタンスに対する処理がロールバックであり処理に成功した場合 * * @param args コマンドライン引数 * @return 終了コード * @see Constants#EXIT_CODE_SUCCESS * @see Constants#EXIT_CODE_WARNING * @see Constants#EXIT_CODE_ERROR */ protected int execute(String[] args) { if (args.length > 2) { System.err.println("Recovererに指定する引数の数が不正です。 引数の数:" + args.length); return Constants.EXIT_CODE_ERROR; } String targetName = args[0]; String executionId; if (args.length == 2) { executionId = args[1]; hasExecutionId = true; } else { executionId = null; hasExecutionId = false; } try { // 初期処理 if (!BulkLoaderInitializer.initDBServer("Recoverer", executionId, PROPERTIES, targetName)) { LOG.error("TG-RECOVERER-01003", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } // 開始ログ出力 LOG.info("TG-RECOVERER-01001", new Date(), targetName, executionId); if (RuntimeContext.get().isSimulation()) { // check only DB connection DBConnection.getConnection().close(); return Constants.EXIT_CODE_SUCCESS; } // ジョブフロー実行テーブルの内容を取得する List<ExporterBean> beans; try { beans = selectRunningJobFlow(executionId); if (beans.size() > 0) { isExistJobFlowInstance = true; } } catch (BulkLoaderSystemException e) { LOG.log(e); LOG.error("TG-RECOVERER-01006", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } // ジョブフロー実行テーブルのレコード毎にリカバリ処理を行う if (isExistJobFlowInstance) { assert beans.size() >= 1; for (ExporterBean bean : beans) { LOG.info("TG-RECOVERER-01016", targetName, bean.getBatchId(), bean.getJobflowId(), bean.getJobflowSid(), bean.getExecutionId()); try { recovery(bean); } catch (BulkLoaderSystemException e) { LOG.log(e); isExistRecoveryFail = true; } } } // 正常終了 return judgeExitCode(targetName, executionId); } catch (Exception e) { try { LOG.error(e, "TG-RECOVERER-01004", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } catch (Exception e1) { System.err.print("Recovererで不明なエラーが発生しました。"); e1.printStackTrace(); return Constants.EXIT_CODE_ERROR; } } } /** * 処理結果に応じて戻り値を判定する。 * @param targetName ターゲット名 * @param executionId ジョブフロー実行ID * @return 戻り値 */ private int judgeExitCode(String targetName, String executionId) { if (hasExecutionId) { // ジョブフロー実行ID指定有りの場合 if (!isExistJobFlowInstance) { LOG.info("TG-RECOVERER-01002", "正常終了(指定されたジョブフローインスタンスが存在しない)", new Date(), targetName, executionId); return Constants.EXIT_CODE_SUCCESS; } else if (isExistRecoveryFail) { LOG.error("TG-RECOVERER-02001", "異常終了(指定されたジョブフローインスタンスのリカバリ処理に失敗した)", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } else if (isExistExecOthProcess) { LOG.error("TG-RECOVERER-02001", "異常終了(指定されたジョブフローインスタンスが処理中である)", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } else if (isExistRollBack) { LOG.info("TG-RECOVERER-01002", "正常終了(指定されたジョブフローインスタンスのロールバックを行った)", new Date(), targetName, executionId); return Constants.EXIT_CODE_WARNING; } else { LOG.info("TG-RECOVERER-01002", "正常終了(指定されたジョブフローインスタンスのロールフォワードを行った)", new Date(), targetName, executionId); return Constants.EXIT_CODE_SUCCESS; } } else { // ジョブフロー実行ID指定無しの場合 if (!isExistJobFlowInstance) { LOG.info("TG-RECOVERER-01002", "正常終了(リカバリ対象のジョブフローインスタンスが存在しない)", new Date(), targetName, executionId); return Constants.EXIT_CODE_SUCCESS; } else if (isExistRecoveryFail) { LOG.error("TG-RECOVERER-02001", "異常終了(リカバリ処理に失敗したジョブフローインスタンスが存在する)", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } else if (isExistExecOthProcess) { LOG.error("TG-RECOVERER-02001", "異常終了(処理中のジョブフローインスタンスが存在する)", new Date(), targetName, executionId); return Constants.EXIT_CODE_ERROR; } else if (isExistRollBack) { LOG.info("TG-RECOVERER-01002", "正常終了(ロールバックを行ったジョブフローインスタンスが存在する)", new Date(), targetName, executionId); return Constants.EXIT_CODE_WARNING; } else { LOG.info("TG-RECOVERER-01002", "正常終了(全てのジョブフローインスタンスをロールフォワードした)", new Date(), targetName, executionId); return Constants.EXIT_CODE_SUCCESS; } } } // CHECKSTYLE:OFF MethodLengthCheck - FIXME refactoring /** * リカバリ処理を行う。 * 処理結果として以下の通り戻り値を返す。 <pre> ・ロールバックを行った場合:true ・ロールフォワードを行った場合:false ・当該ジョブフローがリカバリ対象でなかった場合:false </pre> * @param exporterBean Exporterで使用するパラメータを保持するオブジェクト * @throws BulkLoaderSystemException リカバリ失敗 */ private void recovery(ExporterBean exporterBean) throws BulkLoaderSystemException { String executionId = exporterBean.getExecutionId(); Connection lockConn = null; try { // ジョブフローインスタンスIDの排他制御 LOG.info("TG-RECOVERER-01017", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); lockConn = DBConnection.getConnection(); if (!DBAccessUtil.getJobflowInstanceLock(executionId, lockConn)) { // 他のプロセスが排他制御を行っている為、リカバリ対象外とする。 LOG.info("TG-RECOVERER-01008", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); isExistExecOthProcess = true; return; } else { LOG.info("TG-RECOVERER-01018", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } // 当該ジョブフローインスタンスがリカバリ対象か判断する if (!isExecRecovery(exporterBean, hasExecutionId)) { return; } // 当該ジョブフローの設定を読み込む loadParam(exporterBean); // ロールバック可能か判断する boolean rollBack = judgeRollBack(exporterBean); if (rollBack) { LOG.info("TG-RECOVERER-01023", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId(), "ロールバック"); } else { LOG.info("TG-RECOVERER-01023", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId(), "ロールフォワード"); } // ロールバックできない場合、Exportデータのコピーを行う boolean updateEnd = true; if (!rollBack) { // Exportデータコピー処理を実行する LOG.info("TG-RECOVERER-01019", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); ExportDataCopy copy = createExportDataCopy(); if (!copy.copyData(exporterBean)) { throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01012", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } else { // 更新レコードのコピーが全て終了しているかを表すフラグを取得 updateEnd = copy.isUpdateEnd(); LOG.info("TG-RECOVERER-01020", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } } // エクスポートテンポラリテーブルの削除及びロック解除を行う LOG.info("TG-RECOVERER-01021", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); LockRelease lock = createLockRelease(); if (!lock.releaseLock(exporterBean, updateEnd)) { // ロックの解除に失敗 throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01013", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } else { LOG.info("TG-RECOVERER-01022", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } // 処理結果をログに出力 if (rollBack) { LOG.info("TG-RECOVERER-01014", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId(), "ロールバック"); } else { if (updateEnd) { LOG.info("TG-RECOVERER-01014", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId(), "ロールフォワード"); } else { throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01015", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId(), "ロールフォワード"); } } if (rollBack) { isExistRollBack = true; } } finally { // ジョブフローインスタンスIDの排他を解除 DBAccessUtil.releaseJobflowInstanceLock(lockConn); } } // CHECKSTYLE:ON MethodLengthCheck /** * 当該ジョブフローインスタンスがリカバリ対象かどうかを判断する。 * @param exporterBean Exporterで使用するパラメータを保持するオブジェクト * @param hasParam 引数にジョブフローインスタンスIDが指定されているか * @return リカバリ対象の場合:true、リカバリ対象外の場合:false * @throws BulkLoaderSystemException SQL例外が発生した場合 */ protected boolean isExecRecovery( ExporterBean exporterBean, boolean hasParam) throws BulkLoaderSystemException { String executionId = exporterBean.getExecutionId(); // ジョブフロー実行テーブルにレコードが存在しない場合はエラーとする List<ExporterBean> beans = selectRunningJobFlow(executionId); if (beans.size() == 0) { throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01009", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } // ジョブフローインスタンスIDが指定されていない場合は、 // 当該ジョブフローインスタンスが実行中かMMに問い合わせる if (!hasParam) { if (isRunningJobFlow(executionId)) { // 実行中のため、リカバリ対象外とする。 LOG.info("TG-RECOVERER-01010", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); return false; } } return true; } /** * 指定されたジョブフローの設定を読み込んでBeanに設定する。 * @param exporterBean テンポラリ管理テーブルの内容を保持するBean * @throws BulkLoaderSystemException テンポラリ管理テーブルの内容を保持するBean */ protected void loadParam(ExporterBean exporterBean) throws BulkLoaderSystemException { // DSLプロパティを読み込み JobFlowParamLoader paramLoader = createJobFlowParamLoader(); if (!paramLoader.loadRecoveryParam( exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId())) { throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01007", "DSLプロパティ", MessageFormat.format( "ターゲット名:{0}, バッチID:{1}, ジョブフローID:{2}" , exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId()), exporterBean.getExecutionId()); } exporterBean.setExportTargetTable(paramLoader.getExportTargetTables()); exporterBean.setImportTargetTable(paramLoader.getImportTargetTables()); // リトライ回数・リトライインターバルを読み込み String count = ConfigurationLoader.getProperty(Constants.PROP_KEY_EXP_RETRY_COUNT); String interval = ConfigurationLoader.getProperty(Constants.PROP_KEY_EXP_RETRY_INTERVAL); try { exporterBean.setRetryCount(Integer.parseInt(count)); exporterBean.setRetryInterval(Integer.parseInt(interval)); } catch (NumberFormatException e) { throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01007", "リトライ回数,リトライインターバル", count + "," + interval, exporterBean.getExecutionId()); } } /** * 当該ジョブフローがロールバック可能か判断する。 * @param exporterBean Exporterで使用するパラメータを保持するオブジェクト * @return ロールバック可能な場合:true、ロールバック不可の場合:false * @throws BulkLoaderSystemException テンポラリ管理テーブルの検索に失敗した場合 */ protected boolean judgeRollBack(ExporterBean exporterBean) throws BulkLoaderSystemException { List<ExportTempTableBean> tempBean = null; try { tempBean = getExportTempTable(exporterBean.getJobflowSid()); } catch (BulkLoaderSystemException e) { LOG.log(e); throw new BulkLoaderSystemException(getClass(), "TG-RECOVERER-01011", exporterBean.getTargetName(), exporterBean.getBatchId(), exporterBean.getJobflowId(), exporterBean.getJobflowSid(), exporterBean.getExecutionId()); } // テンポラリ管理テーブルのレコードが存在しない場合、ロールバック可能と判断する if (tempBean == null || tempBean.size() == 0) { return true; } boolean result = false; for (ExportTempTableBean tempTable : tempBean) { // Exportテンポラリテーブル名をセット ExportTargetTableBean tableBean = exporterBean.getExportTargetTable(tempTable.getExportTableName()); if (tableBean != null) { tableBean.setExportTempTableName(tempTable.getTemporaryTableName()); tableBean.setDuplicateFlagTableName(tempTable.getDuplicateFlagTableName()); } // ExportファイルをエクスポートテンポラリテーブルにLoad中のため、ロールバック可能と判断する if (tempTable.getTempTableStatus() == null || tempTable.getTempTableStatus().equals(ExportTempTableStatus.LOAD_EXIT)) { result = true; } } return result; } /** * ジョブフローインスタンスが実行中か問い合わせる。 * @param executionId ジョブフロー実行ID * @return 実行中の場合:true、実行中でない場合:false */ protected boolean isRunningJobFlow(String executionId) { // TODO 未実装。 return false; } /** * エクスポートテンポラリ管理テーブルの情報を取得して返す。 * @param jobflowSid ジョブフローSID * @return エクスポートテンポラリ管理テーブルの情報 * @throws BulkLoaderSystemException SQL例外が発生した場合 */ protected List<ExportTempTableBean> getExportTempTable(String jobflowSid) throws BulkLoaderSystemException { return DBAccessUtil.getExportTempTable(jobflowSid); } /** * ジョブフロー実行テーブルの内容を取得する。 * @param executionId 実行ID * @return ジョブフロー実行テーブルの内容, never {@code null} * @throws BulkLoaderSystemException SQL例外が発生した場合 */ protected List<ExporterBean> selectRunningJobFlow(String executionId) throws BulkLoaderSystemException { List<ExporterBean> beans = DBAccessUtil.selectRunningJobFlow(executionId); return beans; } /** * DSLParamLoaderを生成して返す。 * @return JobFlowParamLoader */ protected JobFlowParamLoader createJobFlowParamLoader() { return new JobFlowParamLoader(); } /** * ExportDataCopyを生成して返す。 * @return ExportDataCopy */ protected ExportDataCopy createExportDataCopy() { return new ExportDataCopy(); } /** * LockReleaseを生成して返す。 * @return LockRelease */ protected LockRelease createLockRelease() { return new LockRelease(); } }
/** * An enumeration that represents action times * for an LDAP trigger specification. * * @author <a href="mailto:[email protected]">Apache Directory Project</a> */ public final class ActionTime { //public static final ActionTime BEFORE = new ActionTime( "BEFORE" ); /** The AFTER instance. */ public static final ActionTime AFTER = new ActionTime( "AFTER" ); //public static final ActionTime INSTEADOF = new ActionTime( "INSTEADOF" ); /** The name. */ private final String name; private ActionTime( String name ) { this.name = name; } /** * Returns the name of this action time. * * @return the name of this action time. */ public String getName() { return name; } /** * {@inheritDoc} */ @Override public String toString() { return name; } /** * {@inheritDoc} */ @Override public int hashCode() { int h = 37; h = h * 17 + ( ( name == null ) ? 0 : name.hashCode() ); return h; } /** * {@inheritDoc} */ @Override public boolean equals( Object obj ) { if ( this == obj ) { return true; } if ( obj == null ) { return false; } if ( getClass() != obj.getClass() ) { return false; } final ActionTime other = ( ActionTime ) obj; if ( name == null ) { if ( other.name != null ) { return false; } } else if ( !name.equals( other.name ) ) { return false; } return true; } }
// This will process implicit route information received from another server. // We will check to see if we have configured or are already connected, // and if so we will ignore. Otherwise we will attempt to connect. func (s *Server) processImplicitRoute(info *Info) { remoteID := info.ID s.mu.Lock() defer s.mu.Unlock() if remoteID == s.info.ID { return } if _, exists := s.remotes[remoteID]; exists { return } if s.hasThisRouteConfigured(info) { return } r, err := url.Parse(info.IP) if err != nil { s.Errorf("Error parsing URL from INFO: %v\n", err) return } opts := s.getOpts() if info.AuthRequired { r.User = url.UserPassword(opts.Cluster.Username, opts.Cluster.Password) } s.startGoRoutine(func() { s.connectToRoute(r, false) }) }
def handle_exception(exc_type, exc_value, exc_traceback): filename, line, dummy, dummy = traceback.extract_tb(exc_traceback).pop() filename = os.path.basename(filename) error = "%s: %s" % (exc_type.__name__, exc_value) msg = "<html>A critical error has occured.<br/> <b>%s</b><br/><br/>It occurred at <b>line %d</b> of file " \ "<b>%s</b>.<br/>Press Yes to see the full error report<br/></html>" % (error, line, filename) message_box = QMessageBox().critical(None, "Error", msg.strip(), QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) log_file = os.path.join(os.getenv("APPDATA"), "QATS.launch.pyw.log") with open(log_file,"w") as f: f.write("".join(traceback.format_exception(exc_type, exc_value, exc_traceback))) if message_box == QMessageBox.Yes: webbrowser.open(log_file) sys.exit(1)
'Mad Max' fans descend on California desert for Wasteland Festival Think of it as Burning Man for die-hard fans of the cult film 'Mad Max' Festival goers attend the first day of Wasteland Weekend in the high desert community of California City in the Mojave Desert, California, where people are gathering for the world's largest post-apocalyptic festival. less Festival goers attend the first day of Wasteland Weekend in the high desert community of California City in the Mojave Desert, California, where people are gathering for the world's largest post-apocalyptic ... more Photo: FREDERIC J BROWN/AFP/Getty Images Photo: FREDERIC J BROWN/AFP/Getty Images Image 1 of / 73 Caption Close 'Mad Max' fans descend on California desert for Wasteland Festival 1 / 73 Back to Gallery The harsh, desolate landscape of California's Mojave Desert provides the perfect hellscape for Wasteland Weekend. The event, taking place Sept. 22 to 25 this year, is geared to die-hard Mad Max fans who come from all over the continent to re-create the post-apocalyptic world portrayed in the four cult films created by George Miller and Byron Kennedy. Forming tribes, they act as if they're survivors of a civilization collapse, following the plot line of the movie series. The festival-goers create a baroque carnivalesque spectacle as they parade across the parched landscape in wild, imaginative costumes (think chest armor, gas masks, lots of studded leather, combat boots) and drive massive rusting junkyard cars. Entertainment includes fire-spinning, burlesque dancing around the bonfire and a football-like game called "jugger ball" played with a dog skull. Think of it as Burning Man for the Mad Max set. Started in 2010, the event takes place two hours outside of Los Angeles and attracts around 2,000 people. Above is a gallery of images taken on the first day of this year's event as well as photographs from last year's festival.
from flask import request from . import bp @bp.route('/task', methods=['GET', 'POST']) def task(): if request.method == 'GET': pass elif request.method == 'POST': pass @bp.route('/task/<int:id>', methods=['GET', 'DELETE', 'PATCH']) def task_id(): if request.method == 'GET': pass elif request.method == 'DELETE': pass elif request.method == 'PATH': pass @bp.route('/task/all', methods=['GET']) def task_all(): pass
/** * This event is raised when a new project resource is created * * @author Shane Bryzak */ public class NewResourceEvent { private ProjectResource resource; public NewResourceEvent(ProjectResource resource) { this.resource = resource; } public ProjectResource getResource() { return resource; } }
We reported last week that David Green (Earth to Echo) would be replacing Jonathan Liebsman as director for Teenage Mutant Ninja Turtles 2, and we can now confirm some of the previous rumours surrounding new character additions. According to Omelete (via Collider), long-time adversaries Bebop and Rocksteady will appear in Teenage Mutant Ninja Turtles 2, possibly alongside Casey Jones. A few months back we posted some concept artwork for Bebop and Rocksteady and were asked to remove them, prompting speculation that they would feature in the sequel. “We can say that Bebop and Rocksteady are in the film, and Casey Jones is a character that we love and we’re trying to fit him in as well,” said producer Andrew Form at CCXP. Co-producer Brad Fuller also confirmed the return of April O’Neil, Vernon Fenwick and Shredder. No word yet on Krang. This will be Bebop and Rocksteady’s first big-screen appearance. They were supposed to be in Teenage Mutant Ninja Turtles 2: The Secret of the Ooze, but this was shot down by creators Kevin Eastman and Peter Laird, and so they were replaced with Tokka and Rahzar. Directed by Jonathan Leibesman (Wrath of the Titans) and produced by Michael Bay (Transformers: Age of Extinction), Teenage Mutant Ninja Turtles starred Megan Fox (Transformers) as April O’Neil, Pete Ploszek (Parks and Recreation) and Johnny Knoxville (Jackass Presents Bad Grandpa) as Leonardo, Alan Ritchson (The Hunger Games: Catching Fire) as Raphael, Jeremy Howard (How the Grinch Stole Christmas) as Donatello, Noel Fisher (Battle Los Angeles) as Michaelangelo, Danny Woodburn (Seinfeld) and Tony Shalhoub (Monk) as Splinter, William Fichtner (The Dark Knight) as Eric Sachs, Will Arnett (The LEGO Movie) as Vernon Fenwick, Whoopi Goldberg (Ghost) as Bernadette Thompson, and K. Todd Freeman (The Dark Knight) as Baxter Stockman. You can listen to the Flickering Myth Podcast interview with William Fichtner using the player below:
class IngestionModel: """ This class wrapping ingestion process API's and provide direct bas functionality """ def __init__(self): self._ingestion_stack_url = config.INGESTION_STACK_URL self._ingestion_catalog_url = config.INGESTION_CATALOG_URL self._ingestion_job_service_url = config.INGESTION_JOB_SERVICE_URL def post_model_ingestion_job(self, request): """This method start and trigger new ingestion process of 3rd model""" if not isinstance(request, dict): _log.error(f'Request should be provided as valid json format:\n{request} => {type(request)}') raise TypeError('Request should be provided as valid json format') full_model_ingestion_url = common.combine_url(self._ingestion_stack_url, config.INGESTION_3RD_MODEL) resp = br.send_post_request(full_model_ingestion_url, body=request) return resp def get_single_job_status(self, job_id): """This method return specific ingestion job status""" if not isinstance(job_id, str): _log.error(f'should be provided job id string expressed as uuid:\n{job_id} => {type(job_id)}') raise TypeError('Request should be provided as valid json format') job_model_ingestion_url = common.combine_url(self._ingestion_job_service_url, config.INGESTION_3RD_JOB_STATUS, job_id) try: resp = br.send_get_request(job_model_ingestion_url) return resp except Exception as e: _log.error(f'Error on get response from ingestion job progress service with error {str(e)}') def is_model_on_catalog(self, identifier): """ This method validate if model exists on catalog based on provided identifier :param identifier: string represented ingested 3rd model """ resp = self.get_single_3rd_metadata(identifier) return True if resp.status_code == config.ResponseCode.Ok.value else False def get_single_3rd_metadata(self, identifier): """This method return specific exists metadata from catalog db""" if not isinstance(identifier, str): _log.error(f'should be provided identifier expressed as string:\n{identifier} => {type(identifier)}') raise TypeError('identifier should be provided as valid json format') model_metadata_on_catalog_url = common.combine_url(self._ingestion_catalog_url, config.INGESTION_CATALOG_MODEL_DATA, identifier) try: resp = br.send_get_request(model_metadata_on_catalog_url) return resp except Exception as e: _log.error(f'Error on get response from catalog db service with error {str(e)}') raise e
use crate::error::Error; use crate::rc_ref::RcRef; use crate::vm::value::Value; use crate::vm::value::symbol::{ SymbolRegistry, SymbolRegistryRef, }; use crate::vm::scope::{ Scope, ScopeRef, }; use std::rc::{ Rc, }; use std::cell::{ Cell, Ref, RefMut, RefCell, }; pub type IsolateRef = RcRef<Isolate>; #[derive(Debug)] pub struct Isolate { scope_ref: Option<ScopeRef>, // Global symbol_register_ref: SymbolRegistryRef, } impl Isolate { pub fn new() -> Self { let symbol_register_ref = SymbolRegistryRef::new(SymbolRegistry::new()); Isolate { scope_ref: None, symbol_register_ref: symbol_register_ref, } } pub fn set_global_scope(&mut self, scope_ref: ScopeRef) { self.scope_ref = Some(scope_ref); } pub fn global(&self) -> &ScopeRef { match self.scope_ref { None => panic!("Should set global scope first!"), Some(ref scope_ref) => scope_ref, } } pub fn symbol_register_ref(&self) -> &SymbolRegistryRef { &self.symbol_register_ref } pub fn run_script(&self, script: &str) -> Result<Value, Error> { unimplemented!() } pub fn run_module(&self, module: &str) -> Result<Value, Error> { unimplemented!() } } #[test] fn test_isolate() { let mut isolate_ref = IsolateRef::new(Isolate::new()); let global_scope_ref = ScopeRef::new(Scope::new(isolate_ref.clone(), None)); isolate_ref.borrow_mut().set_global_scope(global_scope_ref); }
// TestEspeak checks that media.ToAudio() creates the MP3 and OGG files after creating // the WAV files, and that the WAV file can be removed by media.RemoveWAV() // This function also tests media.ImageToVideo() and sets the filename for other tests func TestEspeakToAudio(t *testing.T) { espwav := &EspeakSpeech{"This is some sample text for testing text to speech engines.", "en", "135", "m", "0", "medium", "50"} media, err := espwav.NewEspeakSpeech() if err != nil { t.Fatal("NewEspeakSpeech returned error:", err) } wavFile := dataPath + media.Filename + ".wav" t.Log(wavFile) err = media.ToAudio() if err != nil { t.Fatal("There was an error creating the audio files:", err) } filename := dataPath + media.Filename t.Log("filename =", filename+".mp3") if _, err = os.Stat(filename + ".mp3"); err != nil { t.Fatal("There was an error opening the audio files:", err) } t.Log("filename =", filename+".ogg") if _, err = os.Stat(filename + ".ogg"); err != nil { t.Fatal("There was an error opening the audio files:", err) } tempFile := tempPath + media.Filename ext := "png" if !copy(tempPath+"payload."+ext, tempFile+"."+ext) { t.Fatal("Could not copy "+tempPath+"payload.png to", tempFile) } if videoFilename, err = ImageToVideo(media.Filename, ext); err != nil { t.Fatal("There was an error in ImageToVideo()", err) } os.Remove(tempFile + "." + ext) os.Remove(tempFile) media.RemoveWAV() t.Log("filename =", filename+".wav") if _, err = os.Stat(filename + ".wav"); err == nil { t.Fatal("There was an error removing the WAV file:", err) } os.Remove(filename + ".mp3") os.Remove(filename + ".ogg") os.Remove(dataPath + videoFilename + ".webm") os.Remove(dataPath + videoFilename + ".mp4") }
<gh_stars>0 import {DisplayManager} from "./display_manager"; import {Config} from "./config"; import * as p5 from "p5"; import {global} from "./index"; import {AccuracyEvent} from "./accuracy_event"; import {AccuracyUtil} from "./accuracy_util"; import {AccuracyObserver} from "./accuracy_observer"; import {Drawable} from "./drawable"; export class AccuracyFeedbackFlash implements AccuracyObserver, Drawable { private config: Config; private displayManager: DisplayManager; private numTracks: number; private numColoredAccuracyRanks: number; private static flashDurationInSeconds: number = 0.1; private accuracyColors: [number, number, number, number][]; private lastEvents: AccuracyEvent[]; constructor(config: Config, displayManager: DisplayManager, numTracks: number) { this.config = config; this.displayManager = displayManager; this.numTracks = numTracks; this.numColoredAccuracyRanks = AccuracyUtil.countDifferentHitAccuracies(this.config); this.accuracyColors = [ [178, 94, 247, 180], [30, 217, 124, 160], [196, 199, 30, 140], [245, 213, 221, 120] ]; while (this.numColoredAccuracyRanks > this.accuracyColors.length) { this.accuracyColors.push( [this.getRandomInt(255), this.getRandomInt(255), this.getRandomInt(255), 100] ); } this.lastEvents = []; } private getRandomInt(max: number) { return Math.floor(Math.random() * Math.floor(max)); } public update(accuracyEvent: AccuracyEvent) { this.lastEvents[accuracyEvent.trackNumber] = accuracyEvent; } public draw(currentTimeInSeconds: number) { for (let trackNumber = 0; trackNumber < this.numTracks; trackNumber++) { this.drawFlashForTrack(trackNumber, currentTimeInSeconds); } } private drawFlashForTrack(trackNumber: number, currentTimeInSeconds: number) { let lastEvent: AccuracyEvent = this.lastEvents[trackNumber]; if (this.isFlashHappening(currentTimeInSeconds, lastEvent)) { let centerX = this.displayManager.getNoteCenterX(trackNumber, this.numTracks); let centerY = this.displayManager.getNoteCenterY(currentTimeInSeconds, currentTimeInSeconds); let flashColor: p5.Color = this.getFlashColor(lastEvent); let elapsedTimeInSeconds = this.getElapsedTimeInSeconds(currentTimeInSeconds, lastEvent); this.drawFlash(elapsedTimeInSeconds, centerX, centerY, flashColor); } } private isFlashHappening(currentTimeInSeconds: number, accuracyEvent: AccuracyEvent) { if (accuracyEvent === undefined) { return false; } if (!AccuracyUtil.eventIsAHit(accuracyEvent, this.config)) { return false; } let elapsedTimeInSeconds = this.getElapsedTimeInSeconds(currentTimeInSeconds, accuracyEvent); return elapsedTimeInSeconds <= AccuracyFeedbackFlash.flashDurationInSeconds; } private getElapsedTimeInSeconds(currentTimeInSeconds: number, accuracyEvent: AccuracyEvent) { return currentTimeInSeconds - accuracyEvent.timeInSeconds; } // Assumes symmetrical accuracy settings private getFlashColor(accuracyEvent: AccuracyEvent) { let accuracyRank = AccuracyUtil.getAccuracyRank(accuracyEvent, this.config); let colorValues = this.accuracyColors[accuracyRank - 1]; let p: p5 = global.p5Scene.sketchInstance; return p.color(colorValues[0], colorValues[1], colorValues[2], colorValues[3]); } private drawFlash(elapsedTimeInSeconds: number, centerX: number, centerY: number, color: p5.Color) { let p: p5 = global.p5Scene.sketchInstance; let flashSize: number = this.getFlashSize(elapsedTimeInSeconds, AccuracyFeedbackFlash.flashDurationInSeconds); p.push(); p.translate(centerX, centerY); p.fill(color); // p.fill(255, 255, 255, 150); p.noStroke(); this.drawStar(p, 0, 0, flashSize, flashSize * 0.4, 4); p.pop(); } private getFlashSize(elapsedTimeInSeconds: number, flashDurationInSeconds: number) { let flashCompletionRatio = elapsedTimeInSeconds / flashDurationInSeconds; let minSize = 0; let maxSize = this.config.noteSize; return this.interpolate(minSize, maxSize, flashCompletionRatio); } private interpolate(minValue: number, maxValue: number, ratio: number) { if (ratio <= 0) { return minValue; } else if (ratio > 0 && ratio < 1) { return minValue + (maxValue - minValue) * ratio; } else { return maxValue; } } private drawStar(p: p5, centerX: number, centerY: number, radius1: number, radius2: number, npoints: number) { p.push(); p.angleMode(p.RADIANS); let angle = p.TWO_PI / npoints; let halfAngle = angle / 2.0; p.beginShape(); for (let a = 0; a < p.TWO_PI; a += angle) { let sx = centerX + p.cos(a) * radius2; let sy = centerY + p.sin(a) * radius2; p.vertex(sx, sy); sx = centerX + p.cos(a + halfAngle) * radius1; sy = centerY + p.sin(a + halfAngle) * radius1; p.vertex(sx, sy); } p.endShape(p.CLOSE); p.pop(); } }
Army Staff Sgt. Jackelyn Walker, right, is the aggressor as she fights Pfc. Gregory Langarica in the first round of the bantamweight championship of the finals of the Fort Hood Combative Championships in Killeen, Texas, February 16, 2012. Langarica wins the fight as the fight was called off in the second round. FORT HOOD, Texas — Whap. Whap. Army Staff Sgt. Jackelyn Walker is snapping left jabs at Pfc. Greg Langarica’s head. She doesn’t like his smirk. Whap. Whap. She lunges for his midsection, slams him down and locks him in a chokehold. Langarica’s face goes crimson. His smirk is gone. The 1,000 or so spectators in the Army gym howl with glee. The Army still bars women from fighting in combat units. But some women are trying to break that barrier far from the front lines — by battling male soldiers in chain-link cages against a backdrop of strobe lights, thumping music and swirling smoke. The slugfests resemble ultimate fighting, a staple of pay-per-view television, right down to the black wire cages and throat-constricting holds with names like “the guillotine” and “the rear naked choke.” The Army says the eight-sided enclosure simulates fighting in a small room and helps develop skills that soldiers sometimes need to subdue foes rather than kill them. The brawls are an outgrowth of mixed martial arts training that began in all-male Ranger units in the mid-1990s and soon spread to the rest of the Army. Tournaments were started on mats and in boxing rings at bases around the country. Elaborately staged cage fights — including some pitting women against men — started in 2008, in part because commanders realized they helped with recruiting. In the most recent cage-fighting competition, more than 300 men and 25 women — up from five last year — competed over four days in February at Fort Hood in Texas. One woman made it to the finals. But at least three female fighters were carried out on stretchers. Others limped to a green canvas tent that served as a first-aid station. One fighter burst into tears, upset that a referee had halted her fight before she felt beaten. Unlike participants in Army boxing matches, cage fighters wear open-fingered gloves with thin padding and no headgear. They mostly fight barefoot, wearing camouflage fatigues or T-shirts. Most of the women fight in the lightest weight classes: bantamweight and flyweight. To help balance the odds, they are allowed to outweigh men in the same class by 10 pounds. The early rounds of the Fort Hood tournament were fought on padded mats, not in cages, and no punches or slaps were allowed until the final two days of the competition. The thinking is that less-skilled fighters would be eliminated by then. Pfc. Yennyfer Usuga, a 26-year-old immigrant from Colombia, joined the Army a year ago and started serious fight training only in January. Quiet and seemingly frail at 116 pounds, she seemed out of place among male fighters with bulging biceps. But she won her first few fights, which by luck of the draw were all against other women. Between bouts, Usuga changed from camouflage fatigues into capri pants and shook her shoulder-length reddish brown hair out of an Army-regulation bun. “She’s very feminine,” said her coach, Sgt. Dwan West. Her opponent in the semifinal round was the wiry, heavily tattooed Langarica. Standing 5-foot-5 and weighing less than 115 pounds, he had boxed and trained in jujitsu for years and had no qualms about fighting a woman. Usuga came out swinging wildly and tried kneeing Langarica in the groin. She missed. The artilleryman pummeled her with body shots, threw her to the mat and staggered her with a bare-handed slap to the cheek. She stumbled off in defeat, her eyes dull. Langarica celebrated with a grin and a victory dance, scissoring his feet back and forth like the young Muhammad Ali, prompting a finger-pointing lecture from the referee. The Army remains hyper-macho, but the wars of the last decade showed that women faced considerable danger even in the support jobs to which they were limited. In Iraq, 109 female service members have died, mostly in ambushes and firefights, and 29 have died in Afghanistan, even though they were truck drivers, civil affairs advisers or in other ostensibly noncombat jobs. As a result, many male soldiers — as well as women — now consider the ban on women in combat roles to be outdated. Fighting a woman in a cage is not necessarily a picnic for male soldiers. They face ridicule if they lose and little glory if they win. The women’s motivations vary. “I just like to fight,” said Spc. Amber Sellers. The bouts “teach us to react in the moment without a weapon” and not to back down, said Pfc. Vanessa Edwards. When her mother found out she was fighting men, “she told me to kick their ass,” Edwards said. Spc. Dariana Chesser, 24, decided to join the cage fights last year after serving on a security squad for a senior officer in Afghanistan. “I want to transfer to a combat job,” she said. “I think this helps us prove we are worthy of fighting in combat with men.” Chesser was considered an early favorite among the female fighters. In the month before the tournament, she dropped 38 pounds, down from nearly 170, thanks to twice-a-day training sessions and long sessions in the sauna. She figured she stood a better chance as a flyweight, which has a 135-pound limit for women. But a day before her first fight, dehydrated and weak, she was rushed to a hospital and hooked to an intravenous drip. The next morning, she charged out of her corner and flipped Spc. Alex Seaton. But he quickly snared her in a chokehold. She tapped him, ending the match. “I knew I had her good,” Seaton said. “She felt weak.” A few hours later, Chesser was in the stands, glowering. “We don’t have instant adrenaline like males do,” she said. Although she had lost her shot at the title, she fought again that afternoon in a consolation bout. This time, she was unstoppable. Chesser tossed Spc. Gary Boyd on his back, grabbed his legs and spun him like a top. The mostly male crowd erupted in shouts of delight. When Boyd scrambled to his feet, Chesser leaped on him, twisting his collar to cut off his circulation in a move called a “blood choke.” Two taps on the mat signaled he was done. Chesser thrust her fists in the air. Only Walker, a 33-year-old Oklahoma native who served three tours in Iraq as a forklift driver and supervisor, was the only woman to make the final round. She is 5-foot-2, lithe and strong. Her semifinal opponent, a male sergeant, was disqualified for a vicious elbow jab to her ribs. After nearly five minutes face-down on the mat, she got up slowly. “I tried not to show anything, but it freaking hurt,” she said. The next day, she was back for the bantamweight championship match against Langarica. The fighters made their way to the cage, both punching the air. Pounding music and billowing smoke lent a touch of Las Vegas glitz. Cheering soldiers, families and civilians packed metal chairs and fold-out bleachers around the cage. As the fighters pranced and paraded before the first round, Walker noticed Langarica’s smirk. “I was mad because he was smiling, like, ‘Oh, she’s going to be easy,’ ” she said later. When the horn blared, she charged out of her corner and unleashed a flurry of jabs that sent him backpedaling. She slammed him to the mat and straddled him, clamping on a chokehold. Langarica thrashed and kicked, finally escaping. The crowd roared for more. “Come on, Sgt. Walker!” yelled an Army colonel dressed in fatigues. “You can do it!” In the second round, Langarica regained the momentum. He started landing blows that made Walker wince. Her energy flagged. She leaned against the cage and finally dropped to the mat. She was carried out on a stretcher, her eyes rolled back in her head. “I wanted to be the first female champion on the base,” she said after she was released from the base hospital. “We can be just as tough as the guys. We can do it.” Langarica was magnanimous in victory. He hadn’t beaten a woman, he said. “It was a warrior.”
/** * Handles when the exit button is clicked. */ private class ExitButtonActionHandler implements ActionListener { @Override public void actionPerformed(ActionEvent a) { presenter.Exit(); } }
// NewDefault Create a new MTA server with a // socket protocol implementation. func NewDefault(c Config, h Handler) *DefaultMta { mta := &DefaultMta{ mta: New(c, h), } if mta == nil { return nil } return mta }
<gh_stars>1-10 /// <reference path="./selectFieldOptionMetadataModel.ts" /> /// <reference path="../../../models/core/fieldMetadataModel.ts" /> module Models { 'use strict'; export class SelectFieldMetadata extends FieldMetadata { constructor() { super('Select'); } public Values:Models.SelectFieldOptionMetadata[]; public DefaultValue:Models.SelectFieldOptionMetadata; //If null, then "Please select value ..." is added //Mapping public FieldSpecialProperties:string[] = []; public MapAdditionalProperties(entity:Models.Entity, mapperService:Services.IEntityModelMapperService):void { //Map from entity (when loading from server) to metadata and vice-versa (when saving to server) if (this.Values) { entity.Data.Values = this.Values.map(function (item:Models.SelectFieldOptionMetadata):string { return item.Text; }); } else { this.Values = mapperService.GetSelectFieldOptionsFromArrayProperty(entity.Data, 'Values'); } if (this.DefaultValue) { entity.Data.DefaultValue = this.DefaultValue.Text; } else { this.DefaultValue = mapperService.GetSelectFieldOptionFromStringProperty(entity.Data, 'DefaultValue'); } } } }