content
stringlengths
7
2.61M
Managing Melaleuca (Melaleuca quinquenervia) in the Everglades Until recently melaleuca was spreading at a rate faster than it was being controlled throughout the boundaries of the South Florida Water Management District (District). Although melaleuca is a difficult species to eradicate, apparently District efforts, along with those of other governmental agencies and private groups, are containing its spread within the Everglades Water Conservation Areas (WCAs) and the marsh of Lake Okeechobee. Melaleuca has been completely cleared from WCA-2A, -3B, and -3A, south of Alligator Alley. These areas are now under maintenance control. Maintenance control means applying management techniques in a continuous basis to keep an invasive plant population at its lowest feasible level. Today, the melaleuca infestation is no longer increasing; in many areas, it is being reduced. Preliminary results from the latest District survey indicate melaleuca infestation has decreased considerably in South Florida, especially in public lands. The intent of this paper is to provide an assessment of progress made by the District's melaleuca control program over the past 7 yr.
/* * Copyright 2015-2020 WebPKI.org (http://webpki.org). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.webpki.saturn.acquirer; import io.interbanking.IBRequest; import io.interbanking.IBResponse; import java.io.IOException; import org.webpki.json.JSONObjectReader; import org.webpki.json.JSONObjectWriter; import org.webpki.saturn.common.AccountDataDecoder; import org.webpki.saturn.common.PaymentRequestDecoder; import org.webpki.saturn.common.UrlHolder; import org.webpki.saturn.common.TransactionRequestDecoder; import org.webpki.saturn.common.TransactionResponseDecoder; import org.webpki.saturn.common.TransactionResponseEncoder; import org.webpki.saturn.common.PayeeCoreProperties; import com.supercard.SupercardAccountDataDecoder; //////////////////////////////////////////////////////////////////////////////////////////////// // This is the core Acquirer (Card-Processor) Saturn basic mode payment authorization servlet // //////////////////////////////////////////////////////////////////////////////////////////////// public class TransactionServlet extends ProcessingBaseServlet { private static final long serialVersionUID = 1L; JSONObjectWriter processCall(UrlHolder urlHolder, JSONObjectReader providerRequest) throws Exception { // Decode and finalize the cardpay request TransactionRequestDecoder transactionRequest = new TransactionRequestDecoder(providerRequest, true); PaymentRequestDecoder paymentRequest = transactionRequest.getPaymentRequest(); // Verify that we understand the payee payment method AccountDataDecoder payeeReceiveAccount = transactionRequest .getAuthorizationResponse() .getAuthorizationRequest() .getPayeeReceiveAccount(AcquirerService.payeeAccountTypes); // Verify that the payer's (user) bank is known transactionRequest.verifyPayerBank(AcquirerService.paymentRoot); // Verify that the payee (merchant) is one of our customers String payeeAuthorityUrl = transactionRequest .getAuthorizationResponse() .getAuthorizationRequest() .getPayeeAuthorityUrl(); PayeeCoreProperties payeeCoreProperties = AcquirerService.payeeAccountDb.get(payeeAuthorityUrl); if (payeeCoreProperties == null) { throw new IOException("Unknown merchant: " + payeeAuthorityUrl); } payeeCoreProperties.verify(transactionRequest.getSignatureDecoder()); SupercardAccountDataDecoder accountData = getAccountData(transactionRequest.getAuthorizationResponse()); boolean testMode = transactionRequest.getTestMode(); logger.info((testMode ? "TEST ONLY: ":"") + "Acquiring for Account=" + accountData.logLine() + ", Amount=" + transactionRequest.getAmount().toString() + " " + paymentRequest.getCurrency().toString()); String optionalLogData = null; TransactionResponseDecoder.ERROR transactionError = null; // Here we are supposed to talk to the card payment network.... IBResponse ibResponse = IBRequest.perform(AcquirerService.payerInterbankUrl, IBRequest.Operations.CREDIT_CARD_TRANSACT, accountData.getAccountId(), transactionRequest.getAuthorizationResponse().getReferenceId(), transactionRequest.getAmount(), paymentRequest.getCurrency().toString(), paymentRequest.getPayeeCommonName(), paymentRequest.getReferenceId(), payeeReceiveAccount.getAccountId(), testMode, AcquirerService.acquirerKey); if (!testMode) { optionalLogData = "Payer interbanking ref: " + ibResponse.getOurReference(); } // It appears that we succeeded AcquirerService.transactionCount++; return TransactionResponseEncoder.encode(transactionRequest, transactionError, getReferenceId(), optionalLogData, AcquirerService.acquirerKey); } }
import idc def undname(name): if name.startswith("?"): name = _demangle(name) elif name.startswith("_") or name.startswith("@"): name = name.rsplit('@',1)[0][1:] return name def _demangle(name, short=True): dtype = idc.INF_LONG_DN if short: dtype = idc.INF_SHORT_DN tmp = idc.Demangle(name, idc.GetLongPrm(dtype)) if tmp: name = tmp name = name.replace('__', '::') return name
<reponame>eldariamc/clien package uristqwerty.CraftGuide.api; /** * Prototype for a more convenient recipe API. */ public interface RecipeTemplateBuilder { public RecipeTemplateBuilder nextColumn(); public RecipeTemplateBuilder nextColumn(int gap); public RecipeTemplateBuilder shapelessItemGrid(int width, int height); public RecipeTemplateBuilder shapedItemGrid(int width, int height); public RecipeTemplateBuilder item(); public RecipeTemplateBuilder outputItem(); public RecipeTemplateBuilder machineItem(); public RecipeTemplateBuilder chanceItem(); public RecipeTemplateBuilder liquid(); public RecipeTemplateBuilder setColumnAlign(VerticalAlign align); public RecipeTemplateBuilder setItemAlign(HorizontalAlign align); public RecipeTemplateBuilder nextSlotType(TemplateBuilderSlotType type); public ConstructedRecipeTemplate finishTemplate(); public static enum TemplateBuilderSlotType { INPUT, OUTPUT, MACHINE, DECORATIVE; public boolean drawBackgroupnd() { return this.equals(INPUT) || this.equals(OUTPUT); } public SlotType toSlotType() { return this.equals(INPUT)? SlotType.INPUT_SLOT : this.equals(OUTPUT)? SlotType.OUTPUT_SLOT : this.equals(MACHINE)? SlotType.MACHINE_SLOT : SlotType.DISPLAY_SLOT; } } public static enum VerticalAlign { TOP, CENTER, BOTTOM } public static enum HorizontalAlign { LEFt, CENTER, RIGHT } }
British Airways (BA) has admitted 185,000 additional sets of personal data may have been stolen during a cyber attack last month. In a short regulatory statement, the airline said an ongoing investigation into the attack had identified breaches not previously uncovered. The announcement comes as a fresh blow to BA after it shocked customers last month by revealing more than 380,000 payments had been compromised over a two week period in August and September. Now, two more groups of customers who were not originally notified have also been warned that their details may have been stolen.
Effects of Verbal and Nonverbal Communication of Affection on Avoidantly Attached Partners Emotions and Message Receptiveness Research on adult attachment in romantic relationships has focused on the negative outcomes that avoidantly attached individuals face. The present research uses observational research methods to determine if there are specific ways of communicating affection that might help avoidantly attached people reap similar levels of rewards from affectionate communication as those who are more secure. We combined three samples (Ntotal = 280 couples, 560 participants) who took turns describing a time they felt strong love for their partner, and coded their expressions for cues of verbal affection (i.e., emotion-laden words) and nonverbal affection (i.e., behavioral expressiveness). Higher levels of the speakers nonverbal affection were associated with stronger positive emotion and behavioral receptiveness (i.e., appearing engaged) for listeners higher in attachment avoidance. Altogether, we provide evidence that avoidantly attached individuals may experience positive outcomes from affectionate exchanges when the communication style is tailored to their unique needs.
Precise QCD predictions for the production of a Z boson in association with a hadronic jet We compute the cross section and differential distributions for the production of a Z boson in association with a hadronic jet to next-to-next-to-leading order (NNLO) in perturbative QCD, including the leptonic decay of the Z boson. We present numerical results for the transverse momentum and rapidity distributions of both the Z boson and the associated jet at the LHC. We find that the NNLO corrections increase the next-to-leading order (NLO) predictions by approximately 1% and significantly reduce the scale variation uncertainty. PACS numbers: 12.38Bx The Drell-Yan production of lepton pairs is a benchmark process at hadron colliders like the Large Hadron Collider (LHC). The production of Z bosons (or off-shell photons) with subsequent leptonic decays has both a clean and readily identifiable signature and a large event rate. It is a key process for precision measurements of electroweak (EW) parameters, and also allows to probe various aspects of the strong interaction, including parton distribution functions (PDFs), the strong coupling constant s, and the behaviour of processes involving multiple scales. It is moreover a key ingredient in calibrating several parts of the detector (including the jet energy scale) and can potentially be used to measure the luminosity of the collider. At the LHC, the Z boson is almost always produced together with additional QCD radiation thereby providing a perfect testing ground for our theoretical understanding of both strong and electroweak physics in a hadronic environment. Together the combination of precise experimental data and reliable theoretical predictions enables a variety of precision measurements at the LHC. The importance of the neutral current Drell-Yan process is also reflected in the effort to make the theoretical predictions as precise as possible. For inclusive Z production, theoretical predictions at per-cent level accuracy are available. To attain this level of precision, a variety of higher-order corrections in QCD and the EW theory had to be considered. The cross section for Z production is known at next-to-next-to-leading order accuracy (i.e. at two loops) with respect to QCD corrections. Corrections beyond this order have been studied in the soft-virtual approximation. The NNLO QCD corrections have been combined with a resummation of next-tonext-to-leading logarithmic effects which is necessary to predict the transverse momentum distribution of the Z boson at small p T and matched with parton showers. In the electroweak theory, the next-to-leading order corrections and the mixed QCD-EW corrections also contribute to the precise description of this process. Drell-Yan production in association with hadronic jets has also been intensively studied. The NLO QCD corrections for Z + 1 jet, Z + 2 jets, Z + 3 jets and Z + 4 jets are known while the NLO EW corrections for Z + 1 jet and Z + 2 jets have also been derived. In this letter, we report on the calculation of the NNLO contributions to the neutral-current Drell-Yan process in which the dilepton pair is produced in association with a hard, visible hadronic jet, pp → Z/ * + jet → + − + jet + X. Our results are obtained in the form of a parton-level event generator that provides the corrections in a fully differential form, including the Z/ * boson decay to two charged leptons. The final state of the hard-scattering process is completely reconstructable and the application of an invariant mass cut on the lepton pair can ensure that the process is dominated by resonant Z bosons. The NNLO corrections to Z boson + jet production in hadronic collisions receive contributions from three types of parton-level processes: (a) the two-loop corrections to Z boson-plus-three-parton processes, (b) the one-loop corrections to Z boson-plus-four-parton processes and (c) the tree-level Z boson-plus-fiveparton processes. Figure 1 shows representative Feynman diagrams for each of the partonic multiplicities. The ultraviolet renormalized matrix elements for these processes are integrated over the final state phase arXiv:1507.02850v3 11 May 2016 space appropriate to Z boson + jet final states. All three types of contributions are infrared-divergent and only their sum is finite. While infrared divergences from the virtual corrections are explicit in the one-loop and twoloop matrix elements, divergences from unresolved real radiation become explicit only after phase space integration. The divergences are regulated using dimensional regularization, and a variety of methods have been used for their extraction from the real radiation contributions. All these methods are based on the isolation of the divergent configurations, which are then integrated over the phase space and added to the virtual corrections to yield a finite result: sector decomposition, sector-improved residue subtraction, antenna subtraction, q Tsubtraction and N-jettiness subtraction have all been applied successfully in the calculation of NNLO corrections for a range of LHC processes. In this calculation we employ the antenna subtraction method in which the real radiation subtraction terms are constructed from antenna functions. These antenna functions capture all the unresolved radiation emitted between a pair of hard radiator partons. For hadron-collider observables, either hard radiator can be in the initial or final state, and all unintegrated and integrated antenna functions were derived in Refs.. The cross section corresponding to an initial partonic state ij is given by, where each of the square brackets is finite and well behaved in the infrared singular regions. The construction of the subtraction terms d S,T,U ij,N N LO follows closely the procedure established for jet production and Higgs + jet production. Powerful checks of our formalism are that (a) the poles in the dimensional regularization parameter cancel analytically and (b) that the subtraction terms accurately reproduce the singularity structure of the real radiation matrix elements. Using the antenna subtraction method, we have derived the corresponding subtraction terms for all partonic initial states and all color contributions for Z boson-plusjet production through to NNLO and implemented them in a parton-level event generator. With this program, we can compute any infrared safe observable related to Z + jet final states to NNLO accuracy. The Z boson decay to two charged leptons is included, such that realistic event selection cuts on the leptonic final state can be applied. Renormalization and factorization scales can be chosen (dynamically) on an event-by-event basis. For our numerical computations, we use the NNPDF2.3 parton distribution functions with the corresponding value of s (M Z ) = 0.118 at NNLO, and M Z = 91.1876 GeV. Note that we systematically use the same set of PDFs and the same value of s (M Z ) for the LO, NLO and NNLO predictions. The factorization and renormalization scales are chosen to be ≡ F = R = M Z, with a theoretical uncertainty estimated by varying the scale choice by a factor in the range . We require that the leptons have pseudorapidity, | | < 5 and that the dilepton invariant mass is close to the Z boson mass, 80 GeV < m < 100 GeV. Jets are reconstructed using the anti-k T algorithm with R = 0.5 and are required to have p jet T > 30 GeV and |y jet | < 3. With these cuts, we find that the total cross section at different perturbative orders is given by, so that the inclusive NNLO corrections amount to a 1% increase on the NLO cross section. More information on the impact of the NNLO QCD corrections can be gained from differential distributions in the kinematical variables of the Z boson and the jet. In the kinematical distributions and ratio plots, the error band describes the scale variation envelope as described above, where the denominator in the ratio plots is evaluated at fixed central scale, such that the band only reflects the variation of the numerator. Figure 2(a) shows the inclusive leading jet transverse energy distribution in 10 GeV bins. Due to the inclusiveness of the observable, events with two or three jets with p jet T > 30 GeV and |y jet | < 3 are also included. The relative corrections are further exposed in Figure 2(b) where we show the ratio, K = d (N )N LO ()/d (N )LO ( = M Z ). The band shows the effect of varying in the range M Z in the numerator while keeping = M Z in the denominator. For our set of cuts and input parameters, we see that the NLO corrections increase the cross section by between 30% to 70%. At low transverse momentum the NNLO corrections are a positive correction of approximately 1%. The variation with the unphysical scales is significantly reduced as we move from NLO to NNLO. The rapidity distribution of the leading jet is displayed in Figure 3. Note that the distribution is restricted by the requirement that |y jet | < 3. We see that the NLO corrections are typically 35%-40% and relatively flat. The NNLO corrections increase the cross section by approximately 1% over the whole range of y jet with a significantly reduced scale dependence. The Z boson p T distribution in inclusive Z + jet production is shown in Figure 4 where we observe an interesting structure around p Z T ∼ 30 GeV. This behaviour arises from the fact that the Z boson is recoiling against a complicated hadronic final state that contains at least one jet with p jet T > 30 GeV. For this set of cuts, the leading order process is constrained to have p Z T > 30 GeV, while higher order real radiation corrections lift this limitation, since extra partonic radiation can also balance the transverse momentum of the leading jet. This Sudakov shoulder phenomenon is also observed in H+jet production ; it is well understood and leads to large higher order corrections, which require logarithmic resummation. Nevertheless, the NNLO corrections tend to stabilise the NLO result, and in fact simply represent a NLO correction to the p Z T distribution for Z + jet events in this region. At larger transverse momenta, the NNLO corrections increase the prediction by approximately 1%. Figure 5 shows the rapidity distribution of the Z boson. The NLO and NNLO corrections are largest in the forward/backward regions where the phase space is enlarged by the possibility that the hadronic radiation partially balances leading to a smaller Z p T. In these regions, one of the parton momentum fractions is reaching a maximal value. In the central region, the NNLO corrections are very small with a reduced scale dependence. In the differential distributions we observe that the corrections are not always uniform, implying that a rescaling of lower-order predictions is insufficient for precision applications. The need for using the fully differential higher order predictions can be understood for example in the extraction of parton distributions functions from Z + jet production. At leading order, the momentum fractions of the incoming partons is completely fixed by the transverse momenta and rapidities of the final state particles. At higher orders, the real radiation spoils the leading order kinematics, such that where the equality is restored only for the leading order kinematics (p Z T = p jet T ). The relevant x ranges probed by Z boson-plus-jet production is thus determined by the transverse momentum and rapidity distribution of the Z boson and the jet. For our cuts, the smallest momentum fractions probed are x ∼ 8 10 −3, and smaller values of x can be attained by enlarging the rapidity interval or by lowering the transverse momentum cut. In this manuscript we have presented the complete NNLO QCD calculation of Z boson production in association with a jet in hadronic collisions including all partonic subprocesses. This process is measured experimentally to high precision and is an important ingredient to a variety of precision studies of Standard Model parameters and derived quantities as well as a key element in the LHC detector calibration. We have achieved this using the antenna subtraction method that has been successfully applied to other processes at the LHC. For all of the observables considered here, we ob- served a very significant reduction of the respective uncertainties in the theory prediction due to variations of the factorization and renormalization scales with a residual NNLO scale uncertainty of around 1% on the normalization of the distributions. Our calculation will be a crucial tool for precision studies of Z boson + jet production in the upcoming data taking periods at the CERN LHC. This research was supported in part by the Swiss National Science Foundation (SNF) under contracts 200020-149517 and CRSII2-141847, in part by the UK Science and Technology Facilities Council as well as by the Research Executive Agency (REA) of the European Union under the Grant Agreement PITN-GA-2012-316704 ("HiggsTools"), and the ERC Advanced Grant MC@NNLO. Note added: After this paper was initially submitted to Physical Review Letters, a second calculation (employing a different subtraction scheme) of Z+jet production at NNLO precision has been presented and published. In coordination with the authors of, we performed an in-depth comparison, by running our code with their settings (cuts, parton distributions, scale choice). This comparison uncovered an error in the numerical code used in, which alters their published results. After correction of this error, the code developed in agrees with our results.
// Render same as .RenderWithStatus but with status to iris.StatusOK (200) if no previous status exists // builds up the response from the specified template or a serialize engine. // Note: the options: "gzip" and "charset" are built'n support by Iris, so you can pass these on any template engine or serialize engine. // // Look: .ViewData | .MustRender // .ViewLayout | .RenderWithStatus also. // // Examples: https://github.com/kataras/iris/tree/v6/_examples/intermediate/view/ func (ctx *Context) Render(name string, binding interface{}, options ...map[string]interface{}) error { errCode := ctx.ResponseWriter.StatusCode() if errCode <= 0 { errCode = StatusOK } return ctx.RenderWithStatus(errCode, name, binding, options...) }
Reflecting on South African Psychology: Published research, relevance, and social issues As South Africa prepared to host the 30th International Congress of Psychology in 2012, a call was made to reflect on the strengths of and challenges facing contemporary South African Psychology. This article presents our response to our brief to focus on social issues by presenting the results of a situational analysis of South African Psychology over the last 5 years and comparing this corpus of data to a similar analysis reported in Macleod. Articles appearing in the South African Journal of Psychology and abstracts in PsycINFO with the keyword South Africa over a 5.5-year period were analysed. The content of 243 South African Journal of Psychology articles and 1986 PsycINFO abstracts was analysed using the codes developed by Macleod. Results indicate that an increase in the number of articles, a reduction in the percentage of articles using quantitative methodologies and hard science theoretical frameworks (particularly in the South African Journal of Psychology), and an increase in qualitative, theoretical, and methodological articles, and articles using systems-oriented theory (particularly in the South African Journal of Psychology). Traditional topics of assessment, stress and psychopathology continue to dominate, with social issues, such as housing, land reform, development programmes, water resources and socio-economic inequities being largely ignored. Most research continues to be conducted in Gauteng, KwaZulu/Natal and the Western Cape, predominantly with adult, urban-based, middle-class participants, sourced mainly from universities, hospitals or clinics, and schools. Collaborations or comparisons with other African, Asian, South American, and Middle East countries have decreased. While the analysis presented in this paper is limited by its exclusion of books, theses, research reports and monographs, it shows that in published research there are some positive trends and some disappointments. The limited number of social issues featuring in published research, the under-representation of certain sectors of the population as participants, and the decrease in collaboration with, or comparison to, countries from the global South represent challenges that require systematic attention.
0 Parents fight to have diseases added to medical marijuana coverage ATLANTA - The parents of a Cobb County boy with a rare disease are part of a push to get Gov. Nathan Deal to expand the state’s medical marijuana law and allow the production of cannabis oil in the state. Right now it's legal for some patients to possess cannabis oil but they can't get access to it in Georgia. The pleas from Adra and Brian Underwood on behalf of their 1-year-old son Reid did little to move commission members on the growing of medical marijuana in Georgia. “His pain is beyond comprehension for you and me,” Brian Underwood told the medical marijuana commission Wednesday. At first glance, Reid Underwood looks like he has a sunburn. But he suffers from a rare skin disease that is so severe if his parents hold him they tear off his skin. [Special Section: Medical Marijuana in Georgia] “Adra pours bleach and water to his raw skin as this is one of the only ways to clean the wounds,” Brian Underwood said. Channel 2’s Lori Geary was there as the Underwoods testified before state leaders not only for their child's condition to be added to the cannabis oil list, but to also allow the growing of it so they would have access. “Breaking the law to bring back the oil to this state is unacceptable,” Underwood said. “I have grave concerns about that,” Deal said. Last year, Deal signed the law that allows patients with certain conditions to possess cannabis oil but patients must travel out of state to get it, breaking federal law. [READ: Georgia begins issuing medical marijuana cards] But Deal sides with law enforcement on the growing issue. “I think it's enough for us to be very, very cautious and to take our time and now's not the time to expand beyond where we are,” Deal said. “I'm not going to pick a fight with the governor because I’ll lose in that case,” said Republican state Rep. Allen Peake. Peake led the fight and won to legalize medical marijuana in Georgia for patients with certain conditions. He says he'll still push to expand the law. “What we can do is make a compelling argument of how we can maximize the health benefits for citizens in our state by minimizing the public safety risk law enforcement have,” Peake said. [READ: Channel 2 tours Colorado lab that creates medical marijuana oil] Geary followed the Underwoods to Deal's office where they hand delivered a letter hoping to change his mind. For now, they'll keep Reid on powerful narcotics to try and ease the pain. “It is heartbreaking as a parent to see and even more heartbreaking to be a parent that tortures your child,” Underwood said. The commission voted 11-5 against the growing of medical marijuana Wednesday. They did agree to add other conditions to the cannabis oil list. The recommendations will now go to Governor Deal. Rep. Peake said he'll still introduce a bill to grow medical marijuana in Georgia under tight restrictions.
package com.kehua.energy.monitor.app.business.local.setting.standard; import com.kehua.energy.monitor.app.model.entity.Standard; import java.util.List; import me.walten.fastgo.base.mvp.BasePresenter; import me.walten.fastgo.base.mvp.BaseView; public interface StandardContract { interface View extends BaseView { void onSetupData(List<Standard> data); } abstract class Presenter extends BasePresenter<View> { public abstract void setupData(); } }
Design aspects of reconfigurable manufacturing cells as building blocks of flexible manufacturing systems Preliminary results and some practical examples of cell configuration, cell control design, reconfiguration, and redesign for flexible manufacturing systems performed by a hybrid expert system are given. The functions of configuration and reconfiguration are derived as the key sequences to be performed with advanced, intelligent control. The CS-PROLOG-based expert system, called ALL-EX, appears to be an effective tool to tackle this problem. An important feature of the expert system, the integrated simulation module, serves as a verification facility in the selection of an adequate configuration. Experimental results are presented.<<ETX>>
from LPBv2.client import ReadyCheck from .mock_http_connection import MockHTTPConnection from .mock_ready_check_event import ready_check_event import pytest @pytest.fixture def ready_check(): return ReadyCheck(connection=MockHTTPConnection()) def test_ready_check_init(ready_check): assert isinstance(ready_check.http, MockHTTPConnection) @pytest.mark.asyncio async def test_ready_check_accept(ready_check): await ready_check.accept(ready_check_event) assert ready_check.http.endpoint[0] == "/lol-matchmaking/v1/ready-check/accept"
// Note that it makes hasNext() return false private void clear() { schemeIndex = 0; schemeSize = 0; sheetIter = null; }
<reponame>pioug/atlassian-frontend-mirror<gh_stars>1-10 import React from 'react'; import { injectIntl, InjectedIntlProps } from 'react-intl'; import { useAnalyticsEvents, UIAnalyticsEvent, AnalyticsContext, } from '@atlaskit/analytics-next'; import Tooltip from '@atlaskit/tooltip'; import Button from '@atlaskit/button/custom-theme-button'; import EditorCloseIcon from '@atlaskit/icon/glyph/editor/close'; import { name as packageName, version as packageVersion, } from '../../version.json'; import { messages } from '../../messages'; import { CloseButtonContainer } from './styled'; interface Props { // Function executed when the user clicks the close button onClick( event: React.MouseEvent<HTMLElement, MouseEvent>, analyticsEvent?: UIAnalyticsEvent, ): void; } /** * This function will return a CloseButton component only if the function * to be executed on the onClick event is passed as a parameter * * @param onClick - Function executed when the close btn is clicked */ export const CloseButton: React.FC<Props & InjectedIntlProps> = ({ onClick, intl: { formatMessage }, }) => { const { createAnalyticsEvent } = useAnalyticsEvents(); const handleOnClick = ( event: React.MouseEvent<HTMLElement, MouseEvent>, ): void => { if (onClick) { const analyticsEvent: UIAnalyticsEvent = createAnalyticsEvent({ action: 'clicked', }); onClick(event, analyticsEvent); } }; return ( <CloseButtonContainer> <Tooltip content={formatMessage(messages.help_panel_header_close)} position="left" > <Button onClick={handleOnClick} appearance="subtle" iconBefore={ <EditorCloseIcon label={formatMessage(messages.help_panel_header_close)} size="medium" /> } /> </Tooltip> </CloseButtonContainer> ); }; const CloseButtonWithContext: React.FC<Props & InjectedIntlProps> = (props) => { return ( <AnalyticsContext data={{ componentName: 'closeButton', packageName, packageVersion, }} > <CloseButton {...props} /> </AnalyticsContext> ); }; export default injectIntl(CloseButtonWithContext);
PARIS, France — Outside the U.N. Climate Conference site on the outskirts of Paris, in a parking lot surrounded by hybrid buses, stands a pair of 33-foot trees one could mistake for an art installation. Their branches, white and gently sloping skyward, have the feel of PVC piping. Each tree has exactly 63 leaves and can produce 2,400 kilowatt-hours of electricity — enough to power an electric car for more than 10,000 miles annually. That’s because each leaf, twirling around a vertical axis, is a miniature wind turbine. These are les arbres à vent: wind trees. At a climate summit mired in policy debate and finger-pointing, it is easy to forget about the potential of something as simple as a tree. But there are 3 trillion trees in the world, and they are intimately tied to the climate. Alive, they sequester a massive amount of carbon. Chopped down, they release that carbon into the atmosphere. Burned, their contribution to climate change is even worse. Deforestation accounts for at least 11 percent of global greenhouse gas emissions. The Indonesian forest fires that raged this fall emitted more CO2 pollution on some days than the entire United States. In the climate conversation, trees matter. It is fitting that the renewable future envisioned by a vertical wind farm at the conference site has been molded in their likeness. Smokestacks have tended to get much more attention than trees in the climate conversation, but now, thanks to improved scientific understanding of forest carbon sequestration and better technologies for tracking deforestation, that’s beginning to change. The first days of the Paris Climate Conference have seen more than a dozen announcements by governments and companies related to protecting and regenerating forests, offering a touch of optimism amidst the hand-wringing that dominates the summit. Behind each declaration is, ostensibly, a patch of safe land. Germany, Norway, and the United Kingdom, for example, pledged a collective $5 billion between 2015 and 2020 to heavily forested countries that can link forest protection efforts to verifiable emissions reductions. Colombia, in conjunction with the aforementioned countries, announced a $300 million deforestation reduction initiative. Norway and Brazil — the latter which had long been a model of what not to do with one’s forests — announced an extension of a partnership through 2020, in which the Scandinavian country will continue to support Brazil’s efforts in curbing deforestation. Brazil has seen a 70 percent drop in Amazonian deforestation over the past decade. Last year, too, at the Lima Climate Conference, trees received a greater tip of the hat than usual. The Lima-Paris Action Agenda (LPAA) — a joint initiative of Peru and France, along with the office of the U.N. secretary-general and the U.N.’s climate change arm — is a platform for countries, states, private sector players, and NGOs to step up and showcase high-profile climate actions and coalitions in the run-up to Paris. One of the LPAA’s key themes is forests, and its forestry work was celebrated at the climate summit on Tuesday. The U.N.’s main mechanism for protecting forests — the Reducing Emissions from Deforestation and Forest Degradation program, known as REDD+ — was adopted in 2007 and is likely to be stitched into the Paris agreement over the next week. Think of it as a carbon market for forests. If you’re a country or a company interested in offsetting your emissions, purchase some REDD+ credits and you’ll have secured the protection of CO2-sequestering trees for a period of, say, 20 years. Assuming you can trust the host country to actually protect the forest, it’s a sweet deal for you. “Face it: That’s why the private sector is at the table,” Charles Barber told me over a cluttered lunch table. Barber is a senior manager at the World Resources Institute’s (WRI) Forests Program. He spends a lot of time thinking about forests. It’s a nice sentiment, and one that was echoed by the 42 major companies of the We Mean Business coalition that pledged this week to completely eliminate commodity-driven deforestation from their supply chains by 2020. But many forest protection advocates and indigenous activists think REDD+ and other market-focused programs are not the best way to go. One of the problems with REDD+ is that you can’t always trust host countries to protect their forests. The most densely forested nations are often plagued by illegal logging and rampant corruption. More fundamentally, most opponents of REDD+ argue that the mechanism is simply a way for rich countries and private companies to offload their responsibilities to poorer entities: Instead of actually reducing their own emissions, they buy their way to a lower carbon footprint by locking up land in a poorer country. An equitable deforestation-reduction plan would maintain a domestic, indigenous land rights focus, these activists contend. Don’t incentivize deforestation reduction with carbon offsets; just make sure trees are in the hands of communities that care for them. These criticisms are corroborated by recent evidence demonstrating that stronger indigenous land rights are consistently associated with lower carbon emissions from deforestation. In Brazil, for example, indigenous communities with legal forest rights were associated with a 1 percent rate of deforestation, compared to 7 percent in comparable areas outside such communities — equivalent to a 27-times difference in emissions attributed to deforestation. This week, researchers at Woods Hole Research Center reported that tropical indigenous forest land accounts for about 20 percent of global tropical carbon reserves. But, globally speaking, indigenous peoples lack legal rights to almost three-quarters of their land. The economic case for land rights as a solution to deforestation is often compelling, as well. One recent study, for example, showed that it costs only $1.57 per hectare annually to legally secure indigenous forest tenure, while the corresponding emission-reduction benefits are valued between $38 and $230 per hectare annually. Such links between deforestation rates and emission rates have traditionally been difficult to make, however. That’s one of the reasons why, on Tuesday, Barber and collaborators at WRI helped launch Global Forest Watch Climate, an interactive mapping initiative that rides on a hefty Google Earth backbone. The software allows users to translate deforestation rates to carbon emission benchmarks as a function of time. One of Barber’s hopes is that the initiative will help clear up the confusion as to who — governments? international bodies? third parties? — is responsible for monitoring and disseminating deforestation emissions data. Good information is unlikely to change the tide in Paris, though. Potential provisions for indigenous land rights in the negotiating text are sparse and tangential, suggesting the likelihood of locking in only a REDD+ mechanism in any agreement. But the distinction between REDD+ and indigenous forest rights might not have to be so stark, argues Barber. He points to an Australian savanna fire-suppression initiative in which the government allows Aboriginal Australians to generate carbon credits by practicing traditional fire management techniques. In turn, they can sell these credits to companies interested in offsetting emissions. It’s a simple wedding of a carbon market, sustainable incomes for indigenous peoples, and the preservation and transfer of indigenous traditions and knowledge. But whereas fire suppression is a service, secure land tenure is a question of justice. Whether or not the Australian model can be adapted to the deforestation arena remains an open question. Now is the time to find out: Even Brazil saw a 16 percent increase in deforestation rates this past year, a disappointing turnabout after a period of decline.
/** * Default implementation of {@link IPerformanceMetrics}. */ public final class PerformanceMetrics implements IPerformanceMetrics { private static final Logger LOG = LoggerFactory.getLogger(PerformanceMetrics.class); private double logicFps; /** * Checking CPU load can be very slow depending on the JVM used, so rate limit the calls and return a * cached result if called too often. */ private final RateLimiter cpuLoadRateLimiter = RateLimiter.create(.2); private double cpuLoad; public PerformanceMetrics() { } @Override public String getPerformanceSummary() { List<String> lines = Lists.newArrayList(); lines.add(StringUtil.formatRoot("FPS: %d (render)", Gdx.graphics.getFramesPerSecond())); if (logicFps > 0) { lines.add(StringUtil.formatRoot("FPS: %.2f (logic)", logicFps)); } lines.add(StringUtil.formatRoot("CPU: %s", getCpuLoadText())); lines.add(String.format("Memory (managed): %sM", Gdx.app.getJavaHeap() >> 20)); GdxTextureStore texStore = StaticEnvironment.TEXTURE_STORE.getIfPresent(); if (texStore != null) { lines.add("Memory (textures): " + texStore.getCacheStatus()); } lines.add(String.format("Memory (other): %sM", NativeMemoryTracker.get().getTotalBytes() >> 20)); return Joiner.on('\n').join(lines); } private String getCpuLoadText() { double cpuLoad = getCpuLoad(); if (cpuLoad >= 0) { return StringUtil.formatRoot("%03d%%", Math.round(100 * cpuLoad)); } else { return "---"; } } @Override public double getCpuLoad() { if (!cpuLoadRateLimiter.tryAcquire()) { return cpuLoad; } try { Object osBean = getMXBean("OperatingSystemMXBean"); Method method = osBean.getClass().getMethod("getProcessCpuLoad"); method.setAccessible(true); cpuLoad = ((Number)method.invoke(osBean)).doubleValue(); } catch (Exception e) { LOG.info("Error obtaining CPU load (method not supported): " + e); cpuLoadRateLimiter.setRate(1e-3); cpuLoad = Double.NaN; } return cpuLoad; } /** Internal 'game logic' update rate */ public void setLogicFps(double logicFps) { this.logicFps = logicFps; } private static Object getMXBean(String beanName) throws Exception { // java.lang.management isn't supported on Android @SuppressWarnings("LiteralClassName") Class<?> managementFactory = Class.forName("java.lang.management.ManagementFactory"); return managementFactory.getMethod("get" + beanName).invoke(null); } }
WattScale: A Data-driven Approach for Energy Efficiency Analytics of Buildings at Scale Buildings consume over 40% of the total energy in modern societies, and improving their energy efficiency can significantly reduce our energy footprint. In this paper, we present \texttt{WattScale}, a data-driven approach to identify the least energy-efficient buildings from a large population of buildings in a city or a region. Unlike previous methods such as least-squares that use point estimates, \texttt{WattScale} uses Bayesian inference to capture the stochasticity in the daily energy usage by estimating the distribution of parameters that affect a building. Further, it compares them with similar homes in a given population. \texttt{WattScale} also incorporates a fault detection algorithm to identify the underlying causes of energy inefficiency. We validate our approach using ground truth data from different geographical locations, which showcases its applicability in various settings. \texttt{WattScale} has two execution modes -- (i) individual, and (ii) region-based, which we highlight using two case studies. For the individual execution mode, we present results from a city containing>10,000 buildings and show that more than half of the buildings are inefficient in one way or another indicating a significant potential from energy improvement measures. Additionally, we provide probable cause of inefficiency and find that 41\%, 23.73\%, and 0.51\% homes have poor building envelope, heating, and cooling system faults, respectively. For the region-based execution mode, we show that \texttt{WattScale} can be extended to millions of homes in the US due to the recent availability of representative energy datasets. INTRODUCTION Buildings constitute around 40% of total energy and 70% of the overall electricity usage in the United States. Consequently, building energy-efficiency has emerged as a significant area of research in smart grids. A typical city comprises a large number of buildings of different sizes and age. In general, the building stock in many North American and European cities tend to be old-while some are recently constructed, the majority were built decades ago. Moreover, it is not uncommon for buildings to be over a hundred years old. Technological advances in building construction have yielded better-insulated envelopes as well as more energy-efficient air-conditioning, heating furnaces, and appliances, which can reduce the total energy consumption of a building. While newer buildings, as well as older ones that have undergone renovations, have adopted such efficiency measures, most are yet to benefit from such efficiency improvements. Since roughly half of a building's energy usage results from heating and cooling, opportunities abound for making efficiency improvements in cities around the world. Since a city may consist of thousands of buildings, an essential first step for implementing energy-efficiency measures is to identify those that are the least efficient and thus have the greatest need for energy-efficiency improvements. Interestingly, naive approaches such as using the age of the building or its total energy bill to identify inefficient buildings do not work well. While older buildings are usually less efficient than newer ones, the correlation is shown to be weak. Thus, age alone is not an accurate indicator of efficiency, since older buildings may have undergone renovations and energy improvements. Similarly, the total energy usage is not directly correlated to energy inefficiency. First, larger buildings will consume more energy than smaller ones. Even normalizing for size, greater energy usage does not necessarily point to inefficiencies. For example, a single-family home will have a higher energy demand (possibly due to the in-house washer, dryer, and water heater) compared to an identically sized apartment home. Thus, finding truly inefficient buildings requires more sophisticated methods. In this paper, we present a data-driven approach for determining the least efficient residential buildings from a large population of buildings within a city or a region using energy data in association with other external public data sources. Such buildings can then become candidates for energy efficiency measures including targeted energy incentives for improvements or upgrades. So far, lack of granular city-wide datasets prevented large-scale energy efficiency analysis of buildings. However, with increasing smart meter installations across a utilities' customer base, energy usage information of buildings is readily available. By 2016, the US had more than 70 million installed smart meters (>700M worldwide). Also, real estate information describing a building's age, size, and other characteristic are public records in many countries. Further, weather conditions can be accessed through REST APIs. Reliance on such readily available datasets make our approach broadly applicable. Given these datasets, our approach assumes it is possible to model a building's total energy usage as a sum of weather-dependent and weather-independent energy components. The weatherdependent component captures the heating and cooling energy usage, which is typically a function of the external temperature, while the weather-independent component captures the energy use from all other activities. Using this approach, we can then extract the parameter distributions that govern these energy components and identify causes of energy inefficiency by comparing them to those of other homes in a given population. For example, a model's parameter that is more sensitive to external temperature is indicative of inefficient heating or cooling. We also develop algorithms that use these comparisons to determine the probable causes of energy inefficiency. While building energy models have been extensively studied in the energy science research for many decades, and practitioners such as energy auditors routinely use them to analyze a building's energy performance, there are important differences between current approaches and our technique. First, current models employ several important parameters that are often chosen manually, based on rules of thumb. However, using manually chosen parameters may lead to incorrect analysis. On the other hand, our technique determines a custom parameter distribution of the building model, and we experimentally show its efficacy over manual approaches. Second, the current energy models are based on least-squares regression analysis that provides point estimates. In contrast, our approach provides Bayesian estimates to determine building parameter distribution that captures the stochasticity in energy use. Third, current approaches need manual intervention to varying degrees to interpret model parameters and determine likely efficiency issues. Clearly, this does not scale to thousands of buildings across a city. Our technique automates this process by comparing model parameters with similar homes from the population and makes it feasible to perform large-scale analysis. Thus, we go beyond determining which buildings are inefficient by also designing algorithms that determine its probable causes. In this paper, we introduce WattScale, a data-driven approach to determine the most inefficient buildings present in a city or a region. Our contributions are as follows: Bayesian Energy Modeling Approach. WattScale improves over prior work that provides point estimates by using a Bayesian inference to capture the building model parameter distributions that govern the energy usage of a building. These distributions are compared using second-order stochastic dominance to create a partial order among building parameters. Further, we propose a fault analysis algorithm that utilizes these partial orders to report probable causes of inefficiency. Open-source tool with Dual Execution Modes. We implement WattScale approach as an open source tool that enables determining inefficient buildings at scale. Our tool offers two execution modes -(i) individual, and (ii) region-based. In the individual execution mode, we flag inefficient homes by comparing their building model parameter distributions with other similar homes in a city. Whereas in the region-based execution mode, we compare the building model parameter distributions of the candidate home with those learned for the entire population of similar homes in a given region with similar weather conditions. Model Validation and Analysis. We evaluate WattScale using energy data from three different cities in geographically diverse regions of the US. In particular, we show that our approach can disaggregate the buildings' energy usage into different components with high accuracy and tighter bounds on the model parameters -an improvement over the two popular baselines. Further, our approach identifies buildings that have possible energy inefficiencies. In comparison to manual audit reports, our approach correctly identified faults in nearly 95% of the cases. Real-world case study analysis and wide applicability. We examine our approach using two different case studies showcasing the efficacy of the two execution modes of WattScale. In the first case study, we used the individual execution mode as we had energy usage from smart meters deployed in 10,107 residential buildings in a city through a local utility. WattScale reported more than half of the buildings in our dataset as inefficient, which indicates a significant scope for making energy improvements in several cities. Further, our results indicate poor building envelope as a major cause of inefficiency, which accounts for around 41% of all homes. Heating and cooling system faults comprises 23.73%, and 0.51% of all homes respectively. In another case study, we used region-based execution mode on a smaller dataset of residential buildings from the city of Boulder. Here, we showed that region-based mode can help detect faults in millions of residential buildings in the US and around the world, if a representative energy dataset if available. Thus, using the region-based mode, the individual homeowners can proactively learn about the energy efficiency of their homes without the intervention from their local utility. BACKGROUND In this section, we present background on energy efficiency in buildings and techniques used to model a building's energy usage. Energy Efficiency in Buildings Energy usage in residential buildings has different sources such as heating and cooling, lighting, household appliances etc. There can be many causes of inefficiencies in each of these components, 0 10 20 30 such as the use of inefficient incandescent lighting and the use of inefficient (e.g., non-energy star) appliances. Studies have shown that heating and cooling is the dominant portion of a building's energy usage, comprising over half of the total usage, and it follows that the most significant cause of inefficiency lies in problems with heating and cooling. Two factors determine heating and cooling efficiency of a building: the insulation of the building's external walls and roof ("building envelope") and their ability to minimize thermal leakage, and the efficiency of the heating and cooling equipment. Recent technology improvements have seen advancements on both fronts. New buildings are constructed using modern methods and better construction materials that yield a building envelope that minimizes air leaks and thermal loss through better-insulated walls and roofs and high-efficiency windows and doors. Similarly, new high-efficiency heating and AC equipment are typically 20-30% more efficient than equipment typically installed in the late 1990s and early 2000s. Unfortunately, older residential buildings and even ones built two decades ago do not incorporate such energy efficient features. Further, the building envelope can deteriorate over time due to age and weather and so can mechanical HVAC equipment. Consequently, an analysis of a building's heat and cooling energy use can point to the leading causes of a building's energy inefficiency. Inferring a Building Energy Model One approach to modeling a building's heating and cooling usage is to model its dependence on weather. For example, a building's heating and cooling usage can be modeled as a linear function of external temperature. To intuitively understand why, consider cooling energy usage during the summer. The higher the outside temperature on hot summer days, the higher the AC energy usage. Since the difference between outside and inside temperatures is large, there is more thermal gain, which requires longer duration of cooling to maintain a set indoor temperature. Thus, there is a linear relationship between heating/cooling energy use and outside temperature (see Figure 1(a) and (b)). Given the linear dependence, linear models are commonly used within the energy science research, to capture the relationship between energy use and outside temperature. However, most of the prior approaches do not consider uncertainties that are associated with indicators of building performance. Primarily, these models do not capture the stochastic variations in heating and cooling as well as the weather-independent energy usage resulting from day to day variations in human activities inside a home. As seen in Figure 1, such energy variations exist and our approach uses Bayesian inference to determine the distributions of the building parameter that models these uncertainties in energy use. Problem Formulation Consider a large population of buildings in a city. We assume that a trace of the total daily energy usage is available for each building. We also assume building characteristics, such as age, size, and type (Single Family, Apartment etc.) for each building along with the daily outdoor temperature data are available. Let B be the set of all residential buildings containing information on building characteristics in a city. Further, b i ∈ B denotes the i t h residential building defined by a tuple ⟨E WATTSCALE: OUR APPROACH In this section, we describe the details of our data-driven approach. WattScale's approach is depicted in Figure 2 and it involves three key steps: (i) Learn a building energy model for a home or a region from energy usage data, (ii) Create a partial order of buildings using its parameter distribution from the building model, and finally (iii) Detect building faults causing energy inefficiency for a home. Below, we discuss each step in detail. Building Energy Model We first provide the intuition behind our approach. Heating and cooling costs for a building can be understood using elementary thermodynamics. Typically, in colder months, the outside ambient temperature is colder than the inside building temperature, resulting in a net thermal loss where the inside heat flows outside through the building envelope, causing the inside temperature to drop. In warmer months, the opposite is true. The building experiences a net heat gain where the heat flows inside, causing the building temperature to rise. It follows that every home has a specific temperature T b, where there is neither thermal loss nor thermal gain i.e. the thermodynamic equilibrium. When the outside temperature is above T b, there is a need for AC to cool the home. Conversely, when the temperature is below T b, there is a need for a heater to heat the home. This temperature T b is called the balance point temperature of the building 1. The rate of thermal loss or thermal gain depends on the degree of insulation, airtightness of the building envelope and surface area exposed to outside elements. Better the insulation and airtightness, smaller the rate of loss or gain for a given temperature differential relative to T b. The difference between the outside temperature and the balance point temperature T b is also referred as the degree-days -an indication of how many degrees warmer or colder is the outside weather relative to the building's balance point. Based on this intuition, we now describe our building energy model. Any energy load in a building can be classified as weather independent and dependent. A weather independent load is one where the energy consumed by the device is uncorrelated to the outside temperatureconsumption from loads such as lighting, electronic devices, and household appliances depend on human activity rather than outside weather. Heating and cooling equipment constitute weather dependent loads, as their consumption linearly dependent on the outside temperature relative to the balance point. If we assume that weather independent loads are distributed around a constant value (also called the base load); then the total energy consumed is the sum of the base load and the weather dependent loads (heating and cooling loads) and defined as: where E t ot al d denotes the total energy used by a building on day d ∈ D. E heat d and E cool d denote the energy used for heating and cooling, respectively, on day d, while E base denotes the energy usage of base load appliances. Thus, given a series of observations of the total energy usage and the outside ambient temperature, it is possible to fit a regression and learn the fixed weather independent component (base load) and the temperature dependent component (heating and cooling). This forms the basis for inferring our weather-aware building energy model. Figure 3 illustrates the relationship between outdoor temperature and the energy consumption of a building. The individual data points represent the daily energy usage (along the Y-axis) for a given average outdoor temperature (along the X-axis) of a building. The figure shows that the building has two balance point temperatures -a heating balance point temperature T heat, below which heating units are turned on, and a cooling balance point temperature T cool, above which air-conditioning is turned on. Further, the figure also shows a piecewise linear fit over the daily energy usage. When the outdoor temperature is between the two balance points, the building consumes energy that is distributed around a constant value defined as the base load E base energy consumption. The weather dependent components, i.e. the heating E heat and cooling E cool energy consumption, are a function of ambient outdoor temperature T d and are defined as: where heat and cool are the heating and the cooling slope in the above linear equations and represent a positive constant factor indicating the sensitivity of the building to temperature changes; and () + indicates the value is zero if negative and ensures either energy from heating or cooling is considered. Using and, energy model in can be represented as a piecewise linear model: The model in is known as the degree-day model and forms our base energy model for estimating the building parameters. A more in-depth explanation is presented in ASHRAE guideline 14 referring to the five-parameter change point model. Note that the above model will work when data for at least a year is available. However, a truncated version of the model can be employed when only heating (cooling) data is available for winter (summer) months. 3.1.1 Bayesian Inference Parameter Estimation of a Building. While methods like Maximum Likelihood Estimation (MLE) or Maximum a posteriori estimation (MAP) can be used for determining the building parameters, they provide point estimates that can hide relevant information (such as not capturing the uncertainties in human energy usage). To capture human variations, we require probability density function of the parameters. Thus, we use Bayesian inference approach, which provides the posterior distribution of parameters. We model using a bayesian approach and assume the error process to be normally distributed (N (0, 2 )). Thus, the daily energy consumption E t ot al d is normally distributed with parameters mean () and variance ( 2 ), where is equal to the right hand side of. Note that energy consumption E tot al d is known and so is the independent variable i.e. ambient temperature T d. However, the building parameters ( heat, cool, T heat, T cool, and E base ) are unknown. Using Bayesian inference, we can then compute a posterior distribution for each of these parameters that best explains the evidence (i.e. the known values for E t ot al d and T d ∀d ∈ D) from initially assuming a prior distribution. To determine the posterior distribution of the individual parameters, we use the Markov chain Monte Carlo (MCMC) method that generates samples from the posterior distribution by forming a reversible Markov-chain with the same equilibrium distribution. We introduce a prior distribution that represents the initial belief regarding the building parameters. For example, the two balance point temperatures will be between a wide range of 32°F and 100°F. This belief can be represented using a uniform prior with the said range. Similarly, the baseload, heating slope and cooling slope can be drawn from a weakly informative gaussian prior having non-zero values. This is because baseload, a unit of energy, cannot be negative. Similarly, slope values must be positive as they represent increase in energy per unit temperature. The parameters of the gaussian priors are scaled to our setting and selected based on the recommendations provided by Gelman et al.. To simplify our building model, we assume that the parameters are independent, i.e., the heating, cooling and the baseload parameters do not affect one another. Several MCMC methods leverage different strategies to lead from these priors towards the target posterior distribution. We employed No-U-turn sampler, a sophisticated MCMC method, which has shown to converge quickly towards the target distribution. Thus, after an initial burn in samples, we can draw samples approximating the true posterior distribution. From these post-burn-in samples, a posterior distribution for the individual building parameters can be formed. Our complete Bayesian model is defined in Table 1. Table 1. Bayesian formulation of our building energy model. Since buildings are of different sizes, simply comparing the parameters in absolute terms is not meaningful. To enable such comparison, we initially normalize the energy usage by building size before the Bayesian inference. Hence, in our case, E base represents base load energy use per unit area. Similarly, heating slope heat and cooling slope cool gives change in energy per degree temperature per unit area. The balance point parameters (T heat and T cool ) are not normalized as they are unaffected by the size of the house. We construct a cumulative distribution (F he at, F cool, F E bas e ) for each of the building model parameter ( heat, cool, E base ) from their respective density functions (posterior) obtained after the inference. For the balance point parameters (T heat and T cool ), we only use its mean values as they tend to remain fixed for a given building irrespective of human variation. Building Parameter Estimation of a Region. The building energy model in can also be used to the estimate the building parameters for a region. Estimating the distribution of building parameters of a region can allow efficient comparison of a building to a general population. Here, we describe how we can create the building energy model for a given region. Since the above model uses daily energy usage for each home, estimating the parameter distribution for an entire population may be inefficient and time-consuming. Further, such fine-grained daily energy usage for all homes in a region may not be available. Instead, we use the annual consumption information to estimate the population's building parameter. To do so, we modify our energy model as follows. Similar to and, the weather component of a building can be defined as. where H is a set of homes in a region and E heat h and E cool h are the annual heating and cooling consumption for home h. Further, the energy model of a home h can be represented as. where E t ot al h is the total annual energy consumption of home h. This forms the base energy model for estimating the building parameters of a home in a region.. Next, we construct a cumulative distribution (F he at,F cool,F E bas e ) for each of the building model parameter ( heat, cool, E base ) of the input region using the Kernel Density Estimation 2, a popular nonparametric approach to estimate a random variable. Later, we will show how we use this parameter distribution of a region to identify an inefficient home. Partial Order Creation Rather than relying on rule-of-thumb measures to interpret model parameters that change with geography and many other building characteristics, we propose comparing them with those of similar homes from a given population. Given the above model, we create a partial order of buildings as follows. We first create peer groups using the building's physical attributes (e.g., age of the building, building type etc.). Next, within each peer group we create a partial order of the buildings for each building parameter distribution. Below, we describe each step in detail. Peer groups creation. A naive approach of comparing model parameters of any two homes has several shortcomings. First, building parameters may vary based on the building type. As an example, consider the energy use of a studio apartment and a three-bedroom apartment. Both building type have completely different energy needs in term of cooling/heating loads, and the rate of heat gain (or loss) would be different. Hence, a building model's heating/cooling parameter from two different building type would be different, and thus, should not be compared in the same cohort. Second, even for the same building type, the model parameters from two buildings built in a different year, may belong to two different families of distributions, and thus may be an unfair comparison. As an example, assume two houses are equal in all aspects (building characteristics, occupancy patterns, etc.) except year built. Due to advances in building technology and energy efficiency standards, a newer home will have building envelopes made using more energy efficient material than a comparatively older home. While the newer home may be energy efficient compared to older homes, the newer home may still be energy inefficient compared to cohort of homes built around the same year. Thus, it would be unreasonable to compare the building model parameters from homes with a sizeable age difference as outlier detection techniques will always mark older homes as inefficient. To overcome some of these limitations, WattScale allows the creation of peer groups to allow comparison within a cohort to determine inefficient homes. To enable a meaningful comparison, we compare the building model parameters only within their cohort. We use three building attributes for peer group creation namely: (i) property class (e.g., single family, apartment, etc.), (ii) built area (e.g., 2000 to 300 sq.ft.), and (iii) year built (e.g.1945 to 1965). For instance, buildings constructed in different years adhere to different energy regulations and standards, and thus, it is not meaningful to compare them. Similarly, building types and age group have different characteristics and it would be unreasonable to compare them. Hence, our approach allows the creation of peer groups to enable comparison within a cohort to determine inefficient homes. 3.2.2 Stochastic order of building parameters. Since the building model parameters are probabilistic distributions, we cannot simply compare these uncertain quantities and create a total ordering. Statistics, such as mean, median or mode, provide a single number to capture the behavior of the whole distribution. While these point estimates can be used to compare two distributions, they typically hide useful information regarding their shape and may not account for any heavy-tailed nature that is present in a building parameter distribution. Hence, we use second order stochastic dominance, a well-known concept in decision theory for comparing two distributions, to create a partial order of the building parameters within a peer group. The main idea behind determining second order stochastic dominance is that for a given building model parameter p, if distribution F p dominates G p i.e., F p ⪰ 2 G p, then the area enclosed between F p and G p distribution should be non-negative up to every point in x: The area shaded in green shows the region where F p dominates G p, and the red region shows G p dominates F p. In Figure 4(a), we observe that F p ⪰ 2 G p, since there are no green area greater or equal to the left of the red area. In contrast, Figure 4(b) and (c) shows F p dominates G p because for every red area, there exists a larger green area located to its left. To intuitively understand the implications of stochastic dominance in our scenario, let us consider two distributions F p and G p of a building parameter p from two separate buildings A and B respectively. As noted earlier, building parameters influences energy usage, such that higher parameter values implies higher energy usage, and vice-versa. Let us assume that building A's normalized energy usage is greater than building G's normalized energy usage, such that distribution F p dominates G p i.e., F p ⪰ G p. Clearly, the building parameter distribution F p for building A will lie on the right-side of distribution G p as A has higher energy usage. In fact, since F p ⪰ G p, by definition, the distribution F p will be on the right of G p for a majority of the region. However, homes may have similar building parameter distribution, i.e the distribution has similar shape and tendency. In such cases, it is possible that neither home will dominate the other. Stochastic dominance thus enables interpretation of the building parameter distribution with respect to one another, with higher energy usage buildings having a tendency to lie on the right side of the population. This allows separation of homes with dominant distributions from non-dominant ones. Dual execution modes. Our WattScale approach can be used in two execution modes -(i) individual and (ii) region-based. In the individual approach, we run a pair-wise comparison of all buildings within a cohort for each building model parameter p. This gives us the partial order for all pairs and parameters, which we use to detect inefficient homes. In the region-based approach, we compare the building model parameter to that of the region's parameter distribution. Using the stochastic order criteria, it is simple to compare two distributions and identify the dominant distribution. But, there may be cases where there aren't sufficient buildings in a region to create a building parameter distribution of a region. This is because energy consumption data for a given cohort may be sparse. A small city or a region may not have enough buildings to create a parameter distribution for that region and cohort. In order to handle such cases, one approach is to use candidate buildings from nearby regions to create a region-wide parameter distribution for comparison. In our approach, we use an R-tree based data structure to access buildings within a region. R-tree data structures can provide efficient access to spatial objects, especially geographical coordinates. The key idea is that the data structure groups nearby homes and represent them with their minimum bounding rectangle. At the leaf level, each rectangle can be represented as a tree, and subsequent aggregation at higher levels, combine nearby objects, providing a coarse approximation of the data. Thus, it provides fast and efficient access to a group of homes for any region within the bounding rectangle, and the search area can be increased as needed. In our approach, the search space is increased if we do not find sufficient homes to create a building parameter distribution of a region that meets the specified filter criteria. For instance, R-tree can be used to retrieve all homes within a region that were built within a specific year and are of a particular property type (e.g., single family homes). If there are not enough homes that meet the criteria, we include buildings from nearby regions such that the climate conditions of these areas are similar. After sufficient buildings are found, we use these buildings to create the parameter distribution of the region for that peer group. Fault Detection and Analysis We first discuss the causes of inefficiencies associated with the different model parameters. Later, we present our algorithm that identifies inefficient homes and its potential cause. 3.3.1 Parameter relationship with building faults. Heating slope heat and heating balance point temperature T heat are the two parameters that enable our model to interpret the heating inefficiencies of a home. Buildings with high heat lose heat at a higher rate, which in turn affects heating unit usage (i.e., consumes more power) to compensate for the high loss rate. A high energy loss rate can be attributed to poor building insulation, air leakages, or inefficient or heating unit. Separately, heating balance point temperature also indicates inefficiencies in the heating component of a home. A high balance point temperature suggests two possible inefficiencies: (i) high thermostat set-point temperature 3 and (ii) poor building insulation. If the set-point temperature is high during winters, heating units turn on more frequently to maintain the indoor temperature at set-point. In contrast, if building insulation is poor, more heat is lost through the building envelope. Thus, heating units will be turned on frequently to sustain the high heating balance point temperature. Similarly, we can interpret the cooling slopes cool and cooling balance point temperature, which points to inefficiencies in cooling units or building envelope. return home Homes with high E base indicate high appliance usage or inefficient appliances. In such homes, energy retrofits may not help reduce energy consumption. However, these homes may benefit from replacing old appliances (water heater, dryer) with newer energy star rated ones. We summarize the association between probable causes of building faults and model parameter in Table 2. 3.3.2 Inefficient Home Analysis Algorithm. We present the pseudo-code to determine inefficient buildings in Algorithm 1. Depending on the execution mode, we can use our algorithm to find inefficient homes within a cohort or identify whether a candidate home is inefficient. Below, we outline both scenarios. Identify Inefficient Homes within a Cohort: In this scenario, we identify homes that are inefficient homes within a cohort. To do so, we first use the partially ordered set of buildings to determine the outliers for each parameter. To determine outliers, note that the energy usage of an inefficient home would be high. Thus, the building parameter distribution of an inefficient home will tend to be stochastically dominant with respect to others in their peer group. However, among inefficient homes, the building parameter distribution may be similar, and thus their distributions may not be stochastically dominant to one another. Similarly, within energy efficient homes this distinction of dominance may not be apparent, as their distribution may be identical to one another. We use this insight to define a building as inefficient in a given model parameter, if it is stochastically dominant compared to a majority of the homes within its cohort. For instance, if a building's heating parameter distribution F he at is dominant across more than % of the buildings, we conclude that the building is inefficient and has a high heating slope. Here, is the sensitivity threshold for WattScale and provides the flexibility to control the number of inefficient homes. The higher the threshold value, the higher the possibility of identifying an inefficient home. For all experiments, return faults we chose this to be 75%. Thus, for each parameter, we determine whether a building is inefficient if its distribution is dominant beyond a certain threshold. We use a balance point threshold to determine buildings with high balance point temperature. We flag buildings as inefficient if the mean value obtained after inference for heating (or cooling) balance point temperature T heat (or T cool ) is greater than (less than) specific heating (or cooling) balance point threshold 70°F (55°F) -a common choice employed by expert auditors. However, these values can also be provided as parameters to the algorithm. The pseudo-code to determine inefficient homes within a cohort is presented in Algorithm 1. Identify Inefficient Home within a Region: To identify whether a candidate building is inefficient, we use their location information to first create a cohort for comparison. The difference between the previous scenario is that, here, the cohort is not provided in advance. We create the cohort based on the region and the attributes of the candidate building. Further, unlike the previous scenario, where the task is to identify all homes that are inefficient within a cohort, it requires performing an all-pairs comparison within the cohort. In this case, we only have to compare the candidate building against the region's building parameter distribution to examine whether it is inefficient. This approach is illustrated in Algorithm 1. Our approach finds all candidate buildings in a location L that meets the criteria specified in attribute A. Depending on the size of the cohort, our approach expands on the search over a region until sufficient buildings are identified to create the cohort. Once the peer cohort is created, we create and the building distribution parameters of the cohort namely the heating slope heat, cooling slope cool, and the base load base of the region. These parameters are then used to compare against the candidate building to identify any inefficiencies. For instance, if the candidate building's cooling slope is stochastically dominant compared to the region-wide cooling slope parameter, then we indicate it to be inefficient. Root Cause Analysis. As noted earlier, each parameter in the building model affects an energy component defined in. Any irregularity in the building parameter, in comparison to its peer group or the region, points to possible inefficiency in the said energy component. We outline our pseudo-code for finding root cause in Algorithm 2. First, we create a mapping of indicators of deviations in building model parameters to possible faults using Table 2. We provide the mapping as an input to our algorithm. Next, we associate a fault to a home if it was flagged inefficient for the given parameter p. For instance, if a home is flagged as high base load, we say that the home has inefficient appliances. Similarly, an inefficient home with high heating slope is assigned faults related to heating inefficiencies. We then generate a report of the list of potential faults in a given home. IMPLEMENTATION WattScale is split into two components -(i) a Unix-like command line tool 4 that uses PyStan, a statistical modeling library, to implement our bayesian model, and (ii) a web-based application interface implemented using Django framework for interacting with the command line tool. Users can interact with either component to determine likely reasons of inefficiency of an individual building or a group of buildings. To determine the inefficiencies in a single building (i.e., region-based execution mode), we provide users an interface to upload their Green Button friendly format energy usage information. The Green Button initiative provides energy consumers access to their energy consumption data collected from their smart meters. Since many utility companies widely support the Green Button format, this enables our service to be used by millions of consumers in the US. When users upload their Green Button data, along with building information (such as zip code, year built, etc.), WattScale creates a custom bayesian model of the home using the local weather data and the details provided by the user. The weather data of a nearby airport is used as a proxy for local weather conditions, and WattScale periodically fetches and updates this data from public APIs. Further, we use the location data to create a cohort group that matches the attribute of the building provided. For instance, if the user's building is a single family home that was built in the year 1940, our algorithm uses this information to create a peer cohort having similar features, that is, single family homes built around 1940 under similar climate conditions, to enable a fair comparison. We expand our search space, using R-tree based data structure, to identify additional homes if there are not sufficient homes in a given region that match the filter criteria. Next, we create a building parameter distribution of the cohort and compare it with the candidate home provided by the user to determine inefficiencies. We then highlight the likely faults in the home. WattScale can also identify inefficient homes within a group of buildings (i.e., individual execution mode). This mode is useful for utility companies that have access to energy data from several homes in a region to identify a set of homes that are energy inefficient. In this mode, a user uploads the energy information and building attributes for a group of buildings. Here, we assume Austin, TX A city in New England Boulder, CO Table 3. Key characteristics of Dataport and New England-based utility smart meter dataset that the weather conditions are similar for all the input buildings. As again, we use the location of the building to retrieve the local weather data and build a custom bayesian model of the home. We also use the building attributes to create custom peer groups within the set of buildings. Next, users provide a sensitivity threshold that is used to create a partially ordered set of inefficient homes. As utility companies may have a limited audit budget to manually inspect homes, the threshold provides a user the flexibility to control the list of least efficient home. Figure 5(a) shows how users can adjust the sensitivity parameter to get inefficient homes. Finally, our WattScale generates a report listing inefficient homes and their likely faults. Figure 5(b) shows the inefficiency report for a single home listing likely faults. EXPERIMENTAL VALIDATION We first validate our model estimates against ground truth data from three cities and evaluate its efficacy. For each of these datasets, we convert the heating fuel type usage to kWh equivalent. Dataset Description 5.1.1 Dataset 1: Dataport (Austin, Texas). Our first dataset contains energy consumption information from homes located in Austin, Texas from the Dataport Research Program. The dataset contains energy breakdown at an appliance level, which serves as ground truth to understand how our approach disaggregates energy components. We select a subset of homes (163 in total) from this dataset having HVAC, baseload appliances along with the total energy usage information. Since most homes enrolled in the Dataport research program are energy-conscious homeowners, and have energy efficient homes, we use this dataset only for validating our energy disaggregation process. Dataset 2: Utility smart meter data (New England). This dataset contains smart meter data for 10,107 homes from a small city in the New England region of the United States. The dataset has energy usage (in kWh) from both electricity and gas meters. Each home may have more than one smart meter -such as a meter to report gas usage and another to report electricity usage. For homes with multiple meters (gas and electric), we combine their energy usage to determine the building's daily energy consumption for an entire year. Apart from energy usage, the dataset also contains real estate information that includes building's size, the number of rooms, bedrooms, property type (single family, apartment, etc.). We also have manual audit reports for some of the homes. We use this as our ground truth data for validating our approach. Further, we have weather information of the city containing average daily outdoor temperature. of homes (32 in total) from this dataset having several appliances along with the total energy usage information for a period of one year. We will use this dataset to validate the performance of WattScale in identifying inefficient homes using the distribution of building model parameters for a region. We summarize the characteristics of all three datasets in Table 3. Energy Split Validation We now validate the efficacy of our model in disaggregating the overall energy usage into distinct energy components, i.e., heating, cooling, and baseload. For this experiment, we restrict our analysis to the 163 homes from the Dataport (Austin) dataset. We compare our technique with two baseline techniques (LS 65F and LS Range), commonly used in prior work, which use the degree-days model to provide point estimates of the individual building model parameters. Our first baseline technique, LS 65F, estimates the three building energy parameters ( heat, cool,, E base ) using least-squares fit and assumes the balance point temperature to be constant (65 F). This is a widely used approach by energy practitioners around the US and recommended by official bodies such as ASHRAE. Our second baseline technique, LS Range, estimates all the five building energy parameters ( heat, cool, T heat, T cool, and E base ) using the least-squares fit. Unlike the baseline approaches, WattScale estimates the parameter distribution and thus to compare we use the mean of the posterior distribution of the parameters to get the fixed proportion of the energy splits. Figure 6 shows the distribution of percentage difference in the energy usage with the ground truth for each energy component. While LS Range and WattScale have median error of ≈-1.6%, LS 65F have a median error of 10% for baseload energy. Unlike LS 65F, LS Range and WattScale do not assume a constant balance point temperature and thus have lower error. Figure 7 compares the standard deviation of the building parameters from the two approaches. In WattScale, the standard deviations are obtained from the parameter posterior distributions. Whereas, in case of LS Range, the standard deviations are calculated from the covariance matrix outputted by the least-squares routine. While the results for the four parameters are similar, the spread of standard deviation for the lower balance point is much smaller in WattScale compared to LS Range. Thus, WattScale provides an equivalent or tighter bound compared to LS Range. Summary: Fixed parameters provide poor estimate of the building parameter. WattScale provides lower error and tighter parameter estimates compared to other baseline techniques. Faulty Homes Validation We now examine the accuracy of our model in reporting homes with likely faults. We ran our algorithm on all homes in the New England dataset to generate a list of outlier homes for each of the parameter and then compare our results with findings from manual energy audits (ground truth). Since manual audit reports contain faults related to building envelope and HVAC devices only, we only report these results and inefficiencies arising from base energy usage and faulty set points were not analyzed. To determine the accuracy, we compare an inefficient building's parameter to the audit report conducted in the past and verify whether it has any building faults. The audit reports were manually compiled by an expert on-field auditor identifying and suggesting energy efficiency improvement measures. We find that WattScale reported 59 homes with building envelope faults, out of which 56 buildings were in the audit report, an accuracy of 95%. Moreover, we find that 46 of the 56 homes with building envelope faults also had faulty HVAC systems. Summary: WattScale identified parameter related faults in a building with high accuracy. In particular, our approach correctly identified 95% of the homes that were flagged by expert auditors as having either faulty building envelope or HVAC systems. CASE STUDY: IDENTIFYING INEFFICIENT HOMES IN A CITY We conduct a case study on the New England dataset to determine the least efficient residential buildings in the city using the individual execution mode. In particular, we seek to gain insights on the following questions: (i) What percentage of the homes are energy inefficient? (ii) Which groups of homes are the most energy inefficient? (iii) What are the most common causes of energy inefficiency? We first provide a brief analysis of the distribution of the energy split. Energy Split Distribution Analysis To get the fixed proportion of the energy split, we use the mean of the posterior estimates to compute the disaggregated energy usage i.e. heating, cooling and base load components. To compare the energy components, we compute the Energy Usage Intensity (EUI), by normalizing the energy component with the building's built area. Figure 8(a) shows the heating, cooling, base load and total EUI distribution grouped by property type across all homes. The figure shows that the base load is the highest component of energy usage in most Mixed Use and Apartment property types followed by heating and cooling. However, for Single family homes, the heating cost is usually higher. The high base load can be attributed to lighting, water heating, and other appliances. Further, since the New England region has more winter days, homes require more heating, and thus expected to have a higher heating energy footprint compared to cooling. In particular, the average heating energy required is almost 20 that of average cooling energy. We also observe that the normalized total energy usage of single and multi family homes is the highest -presumably due to more number of appliances. The median energy EUI of the Single family home is ≈53 kBtu/sq.ft. (1 kW=3.412kBtu), which is almost twice that of Apartment homes (≈26.8 kBtu/sq.ft.). Observation: Heating energy consumption is 20 that of cooling energy on an average. Energy consumption among Single and Multi family homes is much higher than Apartment or Mixed use homes. Efficiency Analysis In this section, we analyze the results of our approach on the utility company's dataset described earlier. We created peer groups to identify inefficient homes in their respective cohort. To do so, we used three building attributes (property type, age, and area), which created 120 peer groups in total. Among these peer groups, we discarded groups with less than 20 homes, as it didn't have enough population size for a meaningful analysis. In all, 67 peer groups containing a total of 186 homes were discarded. Below, we present our analysis on the remaining 9,921 homes. 6.2.1 Identifying inefficient homes. We examine the number of homes that are flagged as inefficient for each of the energy components using our approach. Table 4 shows the summary of inefficient homes across all peer groups. We note that a home may have multiple inefficiencies, such as inefficient heating and high base load and thus may be inefficient in several of the energy components. Our results show that the overall percentage of inefficient homes across all residential homes is 50.25%. Further, almost 62.25% of all inefficient homes have either inefficient heater or poor building envelope, and 4144 homes have either inefficient heating or cooling 5. Observation: More than half of the buildings in our dataset are likely to be energy inefficient, of which almost 62.25% homes have inefficient heating as a probable cause. Identifying faults in inefficient homes. We now analyze the cause for inefficiency in these inefficient homes. Figure 8(b) shows the percentage of inefficient homes within each building age group across all faults. Note that a home may have multiple faults. We observe that the building envelope fault is the major cause of inefficiency, followed by inefficiency in heaters and other base load appliances. Across all age groups, nearly 41% of the homes have building envelope faults, while 23.73% and 0.51% homes have heating and cooling system faults respectively. The figure also shows that some homes might have set point faults. In particular, 18.06% of the homes have issues with either high heating or low cooling set point temperature. These faults indicate likely issues with thermostat setting. Adjusting the thermostat set point temperature in these home may likely improve its efficiency. As shown, homes built/altered before 1945 have a higher proportion of inefficient homes. However, the percentage difference with other age groups is <15%. Figure 8(c) shows the percentage of inefficient homes within each building property type and faults. We observe that the building envelope faults are the most common faults across all building types. Further, we find that except for HVAC appliance related faults, mixed use property type has the highest percentage of inefficiency in the remaining fault categories. After mixed use property type, apartments tend to have a higher percentage of inefficient homes followed by multi family and single family property types. Observation: Building envelope faults is one of the major cause for inefficiency and present in nearly 41% of homes. However, 18.06% of homes have thermostat set point faults. Changing their set-point may likely improve efficiency in these homes. Neighborhood Analysis. We plot inefficient homes spatially to observe whether inefficient homes are clustered together. To anonymize the data, we partition the map into 5166 grids of size 100100 meters. Further, we bucketize all homes in these grids and report the percentage of inefficient homes within each of them. Figure 9 shows the heat map of the percentage of homes that 5 The number of outliers found will vary due to changes in geographic regions, prevalent building codes, age of the building, property type, etc. We are not making any claims regarding the generality of the final results of the analysis across geographies. However, the analysis itself is quite general and can be applied to data from any part of the world as the main categories of building faults do not change from region to region. The light colored patches are areas with few or no inefficient buildings, while the darker colored areas reveal a higher proportion of inefficient buildings. As seen in the figure, most inefficient homes are co-located. In particular, we find that just 100 grids (=1 sq. km. area) out of the overall 51.66 sq. km. area has more than 50% of all inefficient homes. Observation: Most inefficient homes are co-located. In particular, 50% of all inefficient homes lie in 1 sq. km. area. We summarize the result in Table 4. In percentage terms, among the mixed use peer groups 33.33% of the homes are inefficient. While, in the case of single family peer groups, the fraction of inefficient homes is only 12.74%. However, in absolute values, single family property type has the highest number of inefficient homes (575 homes) followed by apartments (558 homes). Since most of the apartment homes belong to the older age group i.e. buildings built before 1945, these groups can be likely candidate targets. We also observe that in some age groups, there were few outliers, which can be attributed to fewer homes in these groups. Observation: Newer homes are more energy efficient than older ones. Homes built before 1945 represent ≈72% of the total outliers. CASE STUDY: IDENTIFYING INEFFICIENT HOMES ANYWHERE IN THE US We present another case study on the Dataport (Boulder) dataset to validate the energy efficiency results from our scalable region-based execution mode with the results obtained from the individual execution mode. To get the distribution of building parameters of a region, we use the publicly available Building Performance Database (BPD). BPD is the United States' largest dataset containing energy-related information of commercial and residential buildings. Energy Split Distribution Analysis Once again to get the fixed proportion of the energy components, we use the mean of the posterior estimates of the building model parameters. Figure 11 shows the heating, cooling, and the base load EUI's distribution across all the 32 homes. In this dataset, baseload energy component dominates energy consumed for heating and cooling. The average daily energy consumed to run non-HVAC appliances is almost 3.88x and 15.53x that of average heating and cooling energy respectively. In fact, 15 Home ID Energy Efficiency Analysis We now compare the efficacy of the region-based mode in WattScale with the individual mode used to flag inefficient homes. The individual mode needs energy consumption data from several homes present in the same peer-group. Whereas, in the region-based mode, we compute the building model parameters of all the homes in the region to identify the causes of inefficiency. Figure 11 shows the baseload outliers identified by the two modes. As shown, both modes discover 6 homes with excessive baseload energy usage. 5 of the 6 homes flagged by the two modes are common, pointing to significant agreement between them. Home IDs 7 and 16 were only identified by one of the two modes. Figure 12 shows the heating slope outliers identified by the two modes. Here, the region-based approach did not flag a single home. Whereas, the individual mode detected 5 out of the 32 homes to have a higher heating slope. This discrepancy exists as in the region-based mode as we compare the heating slope distribution of each home with the distribution learned from the various homes in BPD, a highly representative set of residential buildings in any region of the US. On the contrary, homes in the Dataport (Boulder) dataset consist of energy conscious households that have undergone energy audits. Observation: WattScale provides two execution modes to flag inefficient homes. The region-based mode provides comparable performance to the individual mode, provided the dataset used to compute the model parameter distributions for the region is from a representative set of homes. DISCUSSION With increasing penetration of smart meter data, building energy usage is easily accessible for a wide population of consumers. At the same time, weather and real-estate data has never been more readily accessible for major parts of the world. Since WattScale uses coarse-grained daily and annual energy consumption to create distribution for a building and region, respectively, we see enormous potential in applying our data-driven approach for various energy-efficiency related analytics. We note that distribution of building parameters for a region can be computed easily as several utilities provide typical load profiles of different sizes of residential homes they serve. In this section, we briefly describe how our approach can benefit various stakeholders to improve the overall energy efficiency of buildings. Utility Companies: As discussed earlier, our approach helps identify inefficient buildings within a cohort. This information when combined with geospatial data can reveal inefficient neighborhoods that can benefit from utility-scale energy awareness drives. Such energy awareness campaigns can foster better customer engagement and also improve the overall energy-efficiency of the locality. Further, based on the likely faults identified, special evidence-based policies can be designed to target inefficient groups and maximize its impact. Policymakers and Government entities: In the US, rebates and incentives are provided both at the federal and the state levels. Policymakers can assess the impact of various subsidies and how it will impact the overall energy consumption. When combined with other information, such as census data, one can target subsidies to economically poor households. These households can benefit from government subsidies to not only improve their overall energy efficiency but also help save money. Researchers: Since our approach can be used beyond the city-scale for different regions, researchers can use our system to study the impact of pre-and post-retrofit modifications in a home and perform randomized tests (A/B Testing). Our tool can be used to create control groups based on various factors such as year built, area, fuel type that affect the efficiency of a building. For example, if a county has incorporated a new energy policy for providing rebates/subsidies, we can assess the impact of the policy by comparing it to other counties. Our tool can also be used for longitudinal studies that record several measurements over multiple years. We can create a building model for individual home across several years and carefully study the impact of retrofits and renovations over time. Further, we can study the impact of any energy policy that the household participates. Homeowners: Our approach can provide custom recommendations to homeowners that best help reduce their energy footprint. When combined with geolocation data, homeowners can compare their efficiency to any region, including nearby neighborhoods. Such personalized energy reports can encourage consumers to take energy efficiency measures to reduce their footprint and energy costs. FUTURE WORK We now discuss WattScale's strengths, limitations and future directions of our work. One of the strengths of WattScale is its applicability to large parts of the world. Since smart meters are being extensively deployed around the world, our approach can be used by tens of millions of homes that collect energy data. Further, our data-driven approach reduces the need for a full manual energy audit in homes. By identifying potential faults in homes, only a partial audit may suffice, thereby freeing resources and provide cost benefits. In order for WattScale to provide accurate analysis, it requires energy consumption data at daily granularity. However, the building model can be modified to work with monthly energy bills, which are more widely available, especially in homes that do not have smart meters installed. Although the accuracy of the estimate of the building parameters may be lower in comparison to models built using daily energy. To overcome this limitation, one can use energy data across multiple years, which remains a part of our future work. The approach detailed in this work relies on comparing building parameters among similar homes. Currently, WattScale only looks at the following a building attributes -(i) building age, (ii) size, and (iii) property type. In the New England dataset, we observed that building age and property type are proxies for several low-level features -i.e., style of the building, flooring type, roof type, etc. We believe that this is due to buildings adhering to the prevalent building codes of the time. However, one can also additionally use satellite data to augment our analysis. For example, we can learn if a home has a swimming pool that may require heating and a water pump, which increases its energy usage. Such homes can be compared to others with swimming pools for fair energy efficiency evaluation. We believe this is an interesting line of research and deserves more attention to gain new insights. Similarly, as part of future work, we intend also to utilize occupancy patterns a building attribute while creating the cohorts. For example, 24/7 occupancy homes should form a separate cohort. In the future, WattScale can also be enhanced to track energy savings and quantify the effectiveness of retrofits in homes. Moreover, while in our current work we look at residential buildings, our work can also be extended to identify inefficiencies in commercial buildings. Additionally, analyzing the seasonal changes (esp. weekly) could yield insights on energy usage patterns for different households. Such an analysis could provide feedback to the homeowners interested in knowing more about their energy consumption profile. For example, the energy data may reveal higher HVAC usage on Sundays, when homeowners are outdoors, thereby encouraging homeowners to set thermostats schedules. RELATED WORK Diagnosing and reducing energy consumption in buildings is an important problem. Various methods have been proposed to detect abnormal energy consumption in a building. However, these methods focused on commercial buildings that require expensive building management systems or requires costly instrumentation using sensors for monitoring purposes. Sensors allow fine-grained monitoring of energy usage but are not scalable due to high installation costs. Unlike prior approaches, our model does not require building management systems or costly instrumentation and use ubiquitous smart meter data to determine energy inefficiency in buildings. Prior work have also proposed automatic modeling of residential loads. Studies have shown that compound loads can be disaggregated into basic load patterns. Separately, there has been studies on non-intrusive load monitoring (NILM), which allow disaggregation of a household's total energy into its contributing appliances, and does not require building instrumentation. However, most NILM techniques require fine-grained datasets for training purposes and assume energy consumption patterns are similar across homes. On the other hand, our approach makes no such assumption on energy consumption patterns and is applicable across multiple homes as it uses coarse-grained energy usage data that are readily available from utility companies. Various energy performance assessment methods exist to quantify energy use in buildings and identify energy inefficiency. A common approach is to use degree-days method, a linear regression model, for calculating building energy consumption. However, these approaches do not consider uncertainties that are associated with indicators of building performance. The idea of modeling uncertainties in thermal comfort is studied in. But, it is restricted to a single office building with cooling and heating systems. Unlike previous studies, our approach can be used to identify least energy efficient home at scale without manual expert intervention. More recently, AI-based approaches have gained significant popularity in the energy and sustainability literature. Wang et al. present a detailed review of AI-based models for energy usage in buildings. In our case, we propose a novel Bayesian model that has better interpretability as it accounts for uncertainties arising from human factors. Finally, we use actual ground truth data to validate our approach and show its efficacy on a large scale city-wide data. CONCLUSIONS Improving efficiency of buildings is an important problem, and the first step is to identify inefficient buildings. In this paper, we proposed WattScale, a data-drive approach to identify the least energy efficient homes in a city or region. We also implemented our approach as an open source tool, which we used to evaluate datasets from different geographical locations. We validated our approach on ground truth data and showed that our model correctly identified 95% of the homes with inefficiencies. Our case study on a city-scale dataset using the individual execution mode showed that more than half of the buildings in our dataset are energy inefficient in one way or another, of which almost 62.25% of homes with heating related inefficiencies as probable cause. This shows that a lot of buildings can benefit from energy efficiency improvements. Further, as WattScale provided region-based execution mode that allows energy efficiency analysis of millions of homes in the US using publically available datasets. As part of future work, we intend to deliver individual inefficiency report generated from our web application to the different homeowners. These nudges can be used to motivate and incentivize homeowners towards energy efficiency measures.
def parse_error_register(self, error_register): err_code = int(('0x' + error_register.replace('ESR', '')), 16) errors = [] if err_code > 128: errors.append('Power on') err_code -= 128 if err_code > 64: errors.append('User request') err_code -= 64 if err_code > 32: errors.append('Command error') err_code -= 32 if err_code > 16: errors.append('Execution error') err_code -= 16 if err_code > 8: errors.append('Device dependent error.') err_code -= 8 if err_code > 4: errors.append('Query error.') err_code -= 4 if err_code > 2: errors.append('Request control') err_code -= 2 return errors
Solar cells convert the sun's energy into useful electrical energy by way of the photovoltaic effect. Modern multijunction solar cells operate at efficiencies significantly higher than traditional, silicon solar cells, with the added advantage of being lightweight. Therefore, solar cells provide a reliable, lightweight and sustainable source of electrical energy suitable for a variety of terrestrial and space applications. A solar cell typically includes a semiconductor material having a certain energy bandgap. Photons in sunlight having energy greater than the bandgap of the semiconductor material are absorbed by the semiconductor material, thereby freeing electrons within the semiconductor material. The freed electrons diffuse through the semiconductor material and flow through a circuit as an electric current. Electron-hole recombination at the rear surface of a solar cell results in a loss of efficiency. Therefore, solar cells are typically provided with a back surface field layer positioned proximate the rear surface of the solar cell. The back surface field layer serves as a barrier to minority carrier flow toward the rear surface (i.e., toward the tunnel junction or the rear electrode). Therefore, the back surface field layer generally prevents the minority carrier from recombining at the back interface or surface, or escaping out of the base, of the solar cell, thereby passivating the base back interface or surface and acting as a minority carrier barrier of the solar cell. Unfortunately, it is becoming increasingly difficult to find higher bandgap material to use as the back surface field layer, particularly for high bandgap solar cells, such as AlGaInP solar cells. Accordingly, those skilled in the art continue with research and development efforts in the field of solar cells.
/** * Entry about an accepted message. */ public class Acceptance implements Serializable { public long viewNumber; public Serializable message; public long msgId; public Acceptance(long viewNumber, Serializable message, long msgId) { this.viewNumber = viewNumber; this.message = message; this.msgId = msgId; } }
Q: When did Haymitch organize the effort to get Katniss out of the 75th Hunger Games? How did Haymitch do it? He couldn't have done it via phone, as no doubt the phones would be tapped. It seems like there was little time to organize the rebellion during the few days in the Capital before they were sent to the arena. And in particular the coordination with District 13 seems quite difficult. How did he do it? I should add that it mentions at the end of the book that it was coordinated from the moment the quell was announced, across many districts, and the capital. Seems difficult to manage, given limited travel and no un-tapped sources... A: I think it's implied though never explicitly stated that the rebellion is older and has been building longer than the Quarter Quell. The Victory Tour and the Quarter Quell were in my view the catalysts that brought forward the rebellion, rather than creating it. Many of the pieces were in my opinion already in place. This might explain why Plutarch Heavensbee volunteered for a job that few wanted - he was biding his time for an opportunity. As for Haymitch managing to organise all that he did? He's clever, remember, as well as being well-connected with his friends among the former Games winners. He won his Games with a trick having intuitively worked out how to use the arena against his opponent. And he's spent a lifetime as the bumbling mentor for District 12. The drinking was a coping mechanism, but in my mind it also a became a cloak, a disguise to deflect any suspicion that he many be capable of organising anything of consequence.
package durianhln.polymorph.desktop; import com.badlogic.gdx.backends.lwjgl.LwjglApplication; import com.badlogic.gdx.backends.lwjgl.LwjglApplicationConfiguration; import durianhln.polymorph.Polymorph; public class DesktopLauncher { public static void main(String[] arg) { LwjglApplicationConfiguration config = new LwjglApplicationConfiguration(); config.title = "Polymorph"; config.width = 504; config.height = 896; new LwjglApplication(new Polymorph(), config); } }
<gh_stars>0 package practice.io.employee_catalog.serialazible; import java.io.*; import java.util.ArrayList; import java.util.List; public class EmployeeManager implements Serializable { private static final String path = "files/employeeS.bin"; public static void main(String[] args) { EmployeeManager manager = new EmployeeManager(); manager.saveOrUpdate(new Employee("igor2356", 24, 10, "worker")); manager.saveOrUpdate(new Employee("igor23", 24, 10, "worker")); manager.saveOrUpdate(new Employee("igor346", 11, 100, "worker")); manager.delete("igor"); manager.saveOrUpdate(new Employee("igor767", 23, 2000, "worker")); manager.show(manager.readObjects()); } public EmployeeManager() { if (!new File(path).exists()) { try { FileOutputStream fos = new FileOutputStream(path); ObjectOutputStream out = new ObjectOutputStream(fos); out.close(); // this is need for write first header otherwise when we will read throw EOFException } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } } } public void saveOrUpdate(Employee e) { List<Employee> list = readObjects(); if (list.isEmpty()) { list = new ArrayList<>(); list.add(e); } else { boolean res = true; for (int i = 0; i < list.size(); i++) { if (list.get(i).equals(e)) { list.set(list.indexOf(list.get(i)), e); System.out.println("inside: " + e); res = false; } else if (i == (list.size() - 1) && res) { list.add(e); } } } try { writeObjects(list); } catch (IOException e1) { e1.printStackTrace(); } } public void delete(String name) { List<Employee> list = readObjects(); for (int i = 0; i < list.size(); i++) { if (list.get(i).getName().equals(name)) { list.remove(i); } } try { writeObjects(list); } catch (IOException e) { e.printStackTrace(); } } public Employee get(String name) { List<Employee> list = readObjects(); for (int i = 0; i < list.size(); i++) { if (list.get(i).getName().equals(name)) { return list.get(i); } } return null; } public List<Employee> getByJob(String job) { ArrayList<Employee> inner = new ArrayList<>(); List<Employee> list = readObjects(); for (int i = 0; i < list.size(); i++) { if (list.get(i).getJob().equals(job)) { inner.add(list.get(i)); } } return inner; } private List<Employee> readObjects() { List<Employee> list = new ArrayList<>(); try { ObjectInputStream in = new ObjectInputStream(new FileInputStream(path)); Employee e = new Employee(); while (e != null) { e = (Employee) in.readObject(); // System.out.println(e); list.add(e); } } catch (IOException e) { return list; } catch (ClassNotFoundException e) { e.printStackTrace(); } return list; } public void show(List<Employee> list) { for (Employee e : list) { System.out.println(e); } } private void writeObjects(List<Employee> listE) throws IOException { ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(path)); for (Employee e : listE) { out.writeObject(e); } out.close(); } private static class AppendingObjectOutputStream extends ObjectOutputStream { public AppendingObjectOutputStream(OutputStream out) throws IOException { super(out); } @Override protected void writeStreamHeader() throws IOException { reset(); } } }
The prevalence of group B streptococcus colonization in Iranian pregnant women and its subsequent outcome. Background: Group B streptococcus colonization in pregnant women usually has no symptoms, but it is one of the major factors of newborn infection in developed countries. In Iran, there is a little information about the prevalence of maternal colonization and newborns infected by group B streptococcus. In order to find the necessary information to create a protocol for prevention and treatment of group B streptococcus infection in newborns, we conducted a study of its prevalence among Iranian pregnant women and its vertical transmission to their newborns. Materials and Methods: This is a cross-sectional descriptive and analytic study performed at Prenatal Care Clinic of the Sarem Hospital from 2009 to 2011. The pregnant women with the gestational age of 35-37 weeks were enrolled in the study. The vaginal culture for group B streptococcus was done for 980 mothers based on our protocol. Among 980 mothers, 48 were shown positive vaginal culture; however, 8 cases among these 48 mothers were positive for both vaginal and urine culture. Babies with mothers showing positive vaginal culture were screened for infection using complete blood count /blood culture (B/C) and C-reactive protein (CRP). Then, a complete sepsis workup was performed for babies with any signs of infection in the first 48 hours after birth, and they received antibiotic therapy if necessary. All collected data were analyzed (SPSS version 15). Results: Among 980 pregnant women with vaginal culture, 48 cases had positive group B streptococcus cultures among which 8 mothers also had positive group B streptococcus urine culture. Our findings revealed that 22 (50%) symptomatic neonates were born from the mothers with positive vaginal culture for group B streptococcus. About 28 of them (63%) had absolute neutrophil count more than normal, and 4 (9.1 %) newborns were omitted from the study. Therefore, 50% of neonates showed clinical feature, whereas para-clinical test was required to detect the infection for the rest of neonates who showed no signs or symptoms. Conclusion: The colonization of group B streptococcus in Iranian women is significant, while 50% of newborns from mother with positive vaginal culture were symptomatic after birth; therefore, screening of newborns for group B streptococcus infection is recommended to become a routine practice in all healthcare centers in Iran. Introduction In the recent decade, Group B Streptococcus (GBS) has been one of the common causes of the early onset of sepsis among the newborns, which leads to high rate of morbidity and mortality. The incidence of early onset GBS disease is from 1.3 to 3.7 per 10000 live births. In addition, GBS is one of the main causes of infection in pregnant women with chorioamnionitis, endometritis, genitourinary tract and surgical wound infection. Genital infection is responsible for almost one-third of preterm deliveries, and GBS produce protease activity resulting to cervical ripening. Most women infected by GBS are asymptomatic, and the organism can be found from their throat, vagina and rectum. According to a report by World Health Organization (WHO), the prevalence of GBS colonization in pregnant women is about 5-40% in different countries. Among infected women, 50% showed GBS colonization in their vagina, while the rest revealed infection in their rectum and throat. However, the prevalence of colonization differs based on the age, parity, race, concurrent vaginal yeast colonization, genetic-ethnic factors, socio-economical status, pork consumption and recent sexual intercourse. GBS colonization of the maternal genital tract is related to early onset neonatal sepsis, as a result of vertical transmission before or during labor. The rate of vertical transmission of GBS between mothers and their offspring is about 29-85% (mean=51%). This transmission to some extent depends on factors including the severity of maternal colonization in birth canal. The rate of GBS infection in the newborn of colonized mother who has not received antibiotic during delivery is one out of 200, and in cases of receiving antibiotic, it is one out of 4000. In the presence of other predisposing factors like prematurity, maternal fever, premature rupture of membranes (PROM) more than 18 hours, low birth weight and multi parity, the infection rate increases. In the USA, the two major prevention strategies for GBS disease include the screening method and the risk-based approach. Pregnant women carrying GBS are offered to take intrapartum antibiotic prophylaxis. The Centers for Diseases Control (CDC) recommended GBS screening for all pregnant women be-tween 35 and 37 weeks of pregnancy, as well as taking intrapartum antibiotic prophylaxis. Pregnant women with unknown GBS status should be treated with antibiotic at the time of delivery. However, this protocol is not being performed completely in many countries including Iran. The mortality rate of early onset sepsis has estimated about 50%. Furthermore, early onset GBS sepsis leads to a severe neonatal condition, which may result to serious neurological damage. In our country, there is not enough information about maternal colonization and newborn infection with GBS. However, few investigations have been performed. For instance, Fatemi et al. have reported GBS maternal colonization prevalence is about 26.7% among 544 pregnant women in the city of Hamedan, Iran. So, we conducted a study of GBS prevalence among Iranian pregnant women and its vertical transmission to their newborns. Materials and Methods This is a cross-sectional descriptive and analytic study performed at Prenatal Care Clinic of the Sarem Hospital in Tehran, Iran in 2011. Vaginal cultures were performed for 980 pregnant women with gestational age of 35-37 weeks. Briefly, two sterile swabs from vagina were taken by a gynecologist and were sent for smear test and culture to the lab. The first swab was used for preparing direct smear and gram staining to detect bacteria, epithelial cells and the number of white blood cells (WBCs). The second swab was cultured for GBS on blood agar, Neisseria on chocolate agar, Gram-negative organism on eosin-methylene blue (EMB) agar and Candida on dextrose agar. Smear was obtained from hemolytic colonies on the blood agar. The catalase test was performed on Gram-positive cocci and positive cyclic adenosine mono phosphate (CAMP) colonies. According to our neonatal intensive care unit (NICU) protocol, complete blood count (CBC), Creactive protein (CRP) and blood count /blood culture (B/C) tests were done for all infants born from mothers with positive history of GBS vaginal colonization (by caesarian section or normal vaginal delivery). If there was any predisposing factor, like premature rupture of membranes (PROM) >18 hours, chorioamnionitis, maternal fever, taking antibiotic during labor, symptomatic newborn, ANC >15000 as a para-clinical infectious predictor, CRP >10 or positive B/C, complete sepsis workup and antibiotics therapy for infants were started. Newborn with Apgar score <7, meconium aspiration, major anomalies, low birth weight (LBW, <2500 gr), or born from mother with preeclampsia or vaginal bleeding were excluded from our study (4 out of 48). The results were analyzed via SPSS by Chi square, and Fisher's exact test. Significance level was set at 0.05. The study was approved by The Review Board of Tehran University of Medical Sciences (TUMS) Prenatal Department and all participants gave written informed consent. Discussion The overall prevalence for GBS colonization in different countries is reported 5-40% depending on the different regions of the world. For example, Grimwood et al. from New Zealand have reported 22%, while Joachim et al. have reported the prevalence of 23% for GBS colonization. Barcaite has shown that the prevalence of GBS colonization in 21 European countries is about 6.5-36%. Multiple evidences have shown that the prevalence of GBS colonization is different in each region; for instance, it was reported as 27.6% in Portugal, 4.7% in India and 20% in Taiwan. In our study, the rate of vaginal colonization was 4.9% which is less than many countries. We only took sample from vagina, but in other studies, the cultures were obtained from vagina, rectum and sometimes throat. In addition, it may relate to other contributory factors, such as the occurrence of colonization in the time interval between culture and delivery, specimen collection, false negative culture due to inadequate swab technique or poor handling, specimen storage conditions, and prolonged transport. Some reports show different positive culture rates in the different culture media. Therefore, our results showed that the rate of vaginal colonization in Iran is approximately the same as the other countries or even more. However, more studies are required to determine the specific rate of vaginal colonization. Although none of mothers in our study had predisposing factor such as PROM, and all of them received antibiotics according to anti-biogram during labor, a newborn in our study had positive B/C. According to the Center for Disease Control (CDC), among 400 newborns in danger of GBS whose mothers has received antibiotic, one newborn showed GBS infection, but in our study, about 50% of the newborns had clinical symptoms which might be due to the severity of maternal colonization and the type of GBS species. The overall vertical transmission rates of GBS colonization in newborns were reported between 6.4% and 28.4%, while the most studies have indicated colonization rates between 8 and 34.5%. A study by Joachim et al. have showed that 10% of infants who were born from GBS positive mothers were infected with GBS. Kuhn et al. reported that 0.75 out of 1000 live birth had GBS sepsis, which is similar to our study (1 out of 1000). However, another investigation showed that approximately, 1% of the prevalence of sepsis belonged to neonates born from women infected with GBS. The causes for differences in prevalence of GBS are identified as follows: density of GBS colonies, intrapartum antibiotic therapy, mode of delivery, and number and time of sampling. Most infections in these newborns occur within the first week of life, especially within the first 24 hours, and sepsis was most common symptoms followed by UTI and pneumonia. Early bacterial infections develop neutropenia. Neutropenia also was common in our symptomatic neonates due to lack of physiology reserve and immunodeficiency in preterm infants. Conclusion Just like other countries, the maternal colonization with GBS is a common problem in Iran. The rate of GBS infection in Iranian newborns is also like the other countries. In order to obtain more information, we recommend screening for GBS in all pregnant women and a close observation for all their newborns. It would be efficient to perform screening studies by repeating culture in pregnant women according to microorganism specific enriched media for detecting the GBS species. Early detection results to early treatment by proper antibiotic for newborn infected by GBS. We found that neonates born from women with positive vaginal cultures were more symptomatic than others, so our results suggest that early therapeutic intervention during labor and after birth would be beneficial. However, a sensible long-term plan in order to develop an effective vaccine and its routine usage in healthcare centers would be a real triumph.
<reponame>barryiwhite/tablasco /* * Copyright 2017 <NAME>. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.gs.tablasco.verify; import com.gs.tablasco.VerifiableTable; import org.junit.Assert; import org.junit.Test; import java.util.Arrays; import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; public class ListVerifiableTableTest { @Test public void testHeaderTypes() throws Exception { // old usages prior to allowing String headers List<Object> headersAsObjects = Collections.singletonList("Col"); List<List<Object>> headersAndDataAsObjects = Arrays.asList(headersAsObjects, Collections.<Object>singletonList("Val")); Assert.assertEquals("Test constructor with headers and rows in one List<List<Object>>", 1, new ListVerifiableTable(headersAndDataAsObjects).getRowCount()); Assert.assertEquals("Test constructor with headers as List<Object>", 2, new ListVerifiableTable(headersAsObjects, headersAndDataAsObjects).getRowCount()); List<String> headersAsStrings = Collections.singletonList("Col"); Assert.assertEquals("Test cast that used to be necessary for headers as List<String>", 2, new ListVerifiableTable((List) headersAsStrings, headersAndDataAsObjects).getRowCount()); // allow passing string headers Assert.assertEquals("Test headers as List<String> can now be passed in as-is", 2, new ListVerifiableTable(headersAsStrings, headersAndDataAsObjects).getRowCount()); } @Test public void createList() { VerifiableTable table = ListVerifiableTable.create(Arrays.asList(Arrays.asList("A", "B"), Arrays.asList(1, 2))); Assert.assertEquals(2, table.getColumnCount()); Assert.assertEquals(1, table.getRowCount()); Assert.assertEquals("A", table.getColumnName(0)); Assert.assertEquals("B", table.getColumnName(1)); Assert.assertEquals(1, table.getValueAt(0, 0)); Assert.assertEquals(2, table.getValueAt(0, 1)); } @Test(expected = IllegalArgumentException.class) public void createList_headersNotStrings() { ListVerifiableTable.create(Arrays.asList(Arrays.asList('A', 'B'), Arrays.asList(1, 2))); } @Test(expected = IllegalArgumentException.class) public void createList_wrongRowSize() { ListVerifiableTable.create(Arrays.asList(Arrays.asList("A", "B"), Arrays.asList(1, 2), Collections.singletonList(3))); } @Test public void createHeadersAndList() { VerifiableTable table = ListVerifiableTable.create(Arrays.asList("A", "B"), Collections.singletonList(Arrays.asList(1, 2))); Assert.assertEquals(2, table.getColumnCount()); Assert.assertEquals(1, table.getRowCount()); Assert.assertEquals("A", table.getColumnName(0)); Assert.assertEquals("B", table.getColumnName(1)); Assert.assertEquals(1, table.getValueAt(0, 0)); Assert.assertEquals(2, table.getValueAt(0, 1)); } @Test(expected = IllegalArgumentException.class) public void createHeadersAndList_wrongRowSize() { ListVerifiableTable.create(Arrays.asList("A", "B"), Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4, 5))); } @Test public void createHeadersAndIterable() { VerifiableTable table = ListVerifiableTable.create(Arrays.asList("A", "B"), Collections.singleton(Arrays.asList(1, 2))); Assert.assertEquals(2, table.getColumnCount()); Assert.assertEquals(1, table.getRowCount()); Assert.assertEquals("A", table.getColumnName(0)); Assert.assertEquals("B", table.getColumnName(1)); Assert.assertEquals(1, table.getValueAt(0, 0)); Assert.assertEquals(2, table.getValueAt(0, 1)); } @Test(expected = IllegalArgumentException.class) public void createHeadersAndIterable_wrongRowSize() { ListVerifiableTable.create(Arrays.asList("A", "B"), new LinkedHashSet<>(Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4, 5)))); } }
Cyclic fibre texture in hot extruded Ni50Mn29Ga21 Abstract The cyclic texture in polycrystalline 5M Ni50Mn29Ga21 magnetic shape-memory alloy fabricated by hot extrusion was investigated with high-energy synchrotron radiation, neutron diffraction, and electron backscatter diffraction. Combination of these techniques reveals that the texture of the hot extruded sample is quite complex. It is composed of components related to the radial direction and rotated around the extrusion axis. Additionally, the dominant texture components change from the centre to the edge of the rod. The recrystallized grains contain many twins with the trace of the twin boundaries preferentially aligned along the extrusion and radial direction showing the cyclic nature of the texture and microstructure. The results are discussed with respect to deformation mode, phase transformations, starting grain structure, and texture.
<gh_stars>1-10 // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/common/content_security_policy/csp_context.h" namespace content { namespace { // Helper function that returns true if |policy| should be checked under // |check_csp_disposition|. bool ShouldCheckPolicy(const ContentSecurityPolicy& policy, CSPContext::CheckCSPDisposition check_csp_disposition) { switch (check_csp_disposition) { case CSPContext::CHECK_REPORT_ONLY_CSP: return policy.header.type == blink::kWebContentSecurityPolicyTypeReport; case CSPContext::CHECK_ENFORCED_CSP: return policy.header.type == blink::kWebContentSecurityPolicyTypeEnforce; case CSPContext::CHECK_ALL_CSP: return true; } NOTREACHED(); return true; } } // namespace CSPContext::CSPContext() {} CSPContext::~CSPContext() {} bool CSPContext::IsAllowedByCsp(CSPDirective::Name directive_name, const GURL& url, bool is_redirect, const SourceLocation& source_location, CheckCSPDisposition check_csp_disposition) { if (SchemeShouldBypassCSP(url.scheme_piece())) return true; bool allow = true; for (const auto& policy : policies_) { if (ShouldCheckPolicy(policy, check_csp_disposition)) { allow &= ContentSecurityPolicy::Allow(policy, directive_name, url, is_redirect, this, source_location); } } return allow; } bool CSPContext::ShouldModifyRequestUrlForCsp( const GURL& url, bool is_subresource_or_form_submission, GURL* new_url) { for (const auto& policy : policies_) { if (url.scheme() == "http" && ContentSecurityPolicy::ShouldUpgradeInsecureRequest(policy) && is_subresource_or_form_submission) { *new_url = url; GURL::Replacements replacements; replacements.SetSchemeStr("https"); if (url.port() == "80") replacements.SetPortStr("443"); *new_url = new_url->ReplaceComponents(replacements); return true; } } return false; } void CSPContext::SetSelf(const url::Origin origin) { self_source_.reset(); // When the origin is unique, no URL should match with 'self'. That's why // |self_source_| stays undefined here. if (origin.unique()) return; if (origin.scheme() == url::kFileScheme) { self_source_ = CSPSource(url::kFileScheme, "", false, url::PORT_UNSPECIFIED, false, ""); return; } self_source_ = CSPSource( origin.scheme(), origin.host(), false, origin.port() == 0 ? url::PORT_UNSPECIFIED : origin.port(), false, ""); DCHECK_NE("", self_source_->scheme); } bool CSPContext::SchemeShouldBypassCSP(const base::StringPiece& scheme) { return false; } void CSPContext::SanitizeDataForUseInCspViolation( bool is_redirect, CSPDirective::Name directive, GURL* blocked_url, SourceLocation* source_location) const { return; } void CSPContext::ReportContentSecurityPolicyViolation( const CSPViolationParams& violation_params) { return; } CSPViolationParams::CSPViolationParams() = default; CSPViolationParams::CSPViolationParams( const std::string& directive, const std::string& effective_directive, const std::string& console_message, const GURL& blocked_url, const std::vector<std::string>& report_endpoints, const std::string& header, const blink::WebContentSecurityPolicyType& disposition, bool after_redirect, const SourceLocation& source_location) : directive(directive), effective_directive(effective_directive), console_message(console_message), blocked_url(blocked_url), report_endpoints(report_endpoints), header(header), disposition(disposition), after_redirect(after_redirect), source_location(source_location) {} CSPViolationParams::CSPViolationParams(const CSPViolationParams& other) = default; CSPViolationParams::~CSPViolationParams() {} } // namespace content
import requests import os import threading YOUR_API_KEY = 'a7a4ce2007b42d838d0e91b28c2a53d964d2706d' TIMEOUT = 10 # minutes class Upload: def upload(self, name): r = requests.get('https://send-anywhere.com/web/v1/device', params={'profile_name': 'kicker_camera', 'api_key': YOUR_API_KEY}) device_key = r.json().get('device_key') cookies = {'device_key': device_key} size = os.path.getsize(name) print("INFO: video file size is: " + str(size)) params = {'file': [{'name': name, 'size': size}]} r = requests.post('https://send-anywhere.com/web/v1/key', json=params, cookies=cookies).json() upload_link = r.get('weblink') key = r.get('key') print('INFO: Upload link is: ' + upload_link) print('INFO: SendAnywhere key is: ' + key) threading.Thread( target=self._p2p_transfer, args=(name, upload_link)).start() r = requests.get('https://send-anywhere.com/web/v1/key/' + key, cookies=cookies).json() download_link = r.get('weblink') print('INFO: Download link is: ' + download_link) return download_link def _p2p_transfer(self, name, weblink): print("INFO: Started P2P transfer. Waiting for receiver.") try: x = requests.post(weblink, files={'file': open(name, 'rb')}, timeout=TIMEOUT * 60) if x.status_code == 200: print("INFO: P2P transfer complete! Video successfully transferred.") else: print("ERROR: P2P transfer failed. This may be due to the 10 minute limit for SendAnywhere.") except requests.Timeout: print("ERROR: P2P transfer reached timeout of " + str(TIMEOUT) + " minutes. (Timeout)") except requests.ConnectionError: print("ERROR: P2P transfer reached timeout of " + str(TIMEOUT) + " minutes. (ConnectionError)") if os.path.exists(name): os.remove(name) if __name__ == "__main__": Upload().upload('output.mp4')
Sn-2.5Ag-0.7Cu-0.1Re-xNi Lead-Free Solder Alloy and its Creep Properties of Solder Joints The effects of Ni on the properties of the Sn-2.5Ag-0.7Cu-0.1Re solder alloy and its creep properties of solder joints are researched. The results show that with adding 0.05wt% Ni in the Sn-2.5Ag-0.7Cu-0.1Re solder alloy, the elongation can be sharply improved without decreasing its tensile strength and it is 1.4 times higher than that of the commercial Sn-3.8Ag-0.7Cu solder alloy. Accordingly the creep rupture life of Sn-2.5Ag-0.7Cu-0.1Re-0.05Ni solder joints is the longest, which is 13.3 times longer than that of Sn-2.5Ag-0.7Cu-0.1Re and is also longer than that of the commercial Sn-3.8Ag-0.7Cu solder alloy. In the same environmental conditions, the creep rupture life of Sn-2.5Ag-0.7Cu-0.1Re-0.05Ni solder joints can sharply decrease with increasing the temperature and stress.
Bloc says it supports Libya's hosting of trial of Saif al-Islam, son of former Libya leader, in defiance of ICC request. The Arab League has said it supports Libya in its desire to try Saif al-Islam, son of killed Libyan leader Muammar Gaddafi, on its territory. "The Arab League supports the Libyan position to hold the trial of Saif al-Islam Gaddafi in front of a Libyan justice," tribunal, the pan-Arab organisation said in a statement on Monday. "The Libyan government has repeatedly assured that all conditions ... would be met to organise a fair and impartial trial on its territory." Saif was arrested in November in southern Libya and is now held in the town of Zintan, 180km southwest of Tripoli, the capital, by former rebels who fought his father's forces during last year's conflict. On April 6, Libya filed an appeal against the request of the International Criminal Court for the immediate transfer of Saif, wanted for crimes against humanity by the court. The ICC, located in The Hague, in the Netherlands, has rejected the Libyan appeal.
def load(cls: T, path: Path) -> T: stream = "\n".join(read_lines(path)) data = yaml.full_load(stream) fields = resolve_types(cls, data) return cls(**fields)
<reponame>stbly/gemp-swccg-public package com.gempukku.swccgo.cards.set11.dark; import com.gempukku.swccgo.cards.AbstractNormalEffect; import com.gempukku.swccgo.cards.GameConditions; import com.gempukku.swccgo.cards.conditions.OnTableCondition; import com.gempukku.swccgo.cards.effects.usage.OncePerTurnEffect; import com.gempukku.swccgo.cards.evaluators.NegativeEvaluator; import com.gempukku.swccgo.cards.evaluators.StackedEvaluator; import com.gempukku.swccgo.common.*; import com.gempukku.swccgo.filters.Filter; import com.gempukku.swccgo.filters.Filters; import com.gempukku.swccgo.game.PhysicalCard; import com.gempukku.swccgo.game.SwccgGame; import com.gempukku.swccgo.logic.GameUtils; import com.gempukku.swccgo.logic.actions.TopLevelGameTextAction; import com.gempukku.swccgo.logic.conditions.Condition; import com.gempukku.swccgo.logic.effects.CancelCardOnTableEffect; import com.gempukku.swccgo.logic.effects.LoseForceEffect; import com.gempukku.swccgo.logic.effects.choose.StackCardFromHandEffect; import com.gempukku.swccgo.logic.evaluators.Evaluator; import com.gempukku.swccgo.logic.modifiers.CancelsGameTextModifier; import com.gempukku.swccgo.logic.modifiers.EachBattleDestinyModifier; import com.gempukku.swccgo.logic.modifiers.Modifier; import java.util.Collections; import java.util.LinkedList; import java.util.List; /** * Set: Tatooine * Type: Effect * Title: You Want This, Don't You? */ public class Card11_078 extends AbstractNormalEffect { public Card11_078() { super(Side.DARK, 3, PlayCardZoneOption.YOUR_SIDE_OF_TABLE, "You Want This, Don't You?", Uniqueness.UNIQUE); setLore("'I can feel the hate swelling in you now...'"); setGameText("Deploy on table. Once per turn may place a card from hand here (lost if Effect canceled). If Luke's Lightsaber on table, its gametext is canceled and opponent's battle destiny draws are -X, where X = number of cards here. Opponent may lose 2 Force to cancel Effect."); addIcons(Icon.TATOOINE); addImmuneToCardTitle(Title.Alter); } @Override protected List<TopLevelGameTextAction> getGameTextTopLevelActions(final String playerId, final SwccgGame game, final PhysicalCard self, int gameTextSourceCardId) { GameTextActionId gameTextActionId = GameTextActionId.OTHER_CARD_ACTION_1; // Check condition(s) if (GameConditions.isOncePerTurn(game, self, playerId, gameTextSourceCardId, gameTextActionId) && GameConditions.hasHand(game, playerId)) { final TopLevelGameTextAction action = new TopLevelGameTextAction(self, gameTextSourceCardId, gameTextActionId); action.setText("Stack card from hand"); action.setActionMsg("Stack a card from hand on " + GameUtils.getCardLink(self)); // Update usage limit(s) action.appendUsage( new OncePerTurnEffect(action)); // Perform result(s) action.appendEffect( new StackCardFromHandEffect(action, playerId, self)); Collections.singletonList(action); } return null; } @Override protected List<Modifier> getGameTextWhileActiveInPlayModifiers(SwccgGame game, final PhysicalCard self) { String opponent = game.getOpponent(self.getOwner()); Filter lukesLightsaber = Filters.Lukes_Lightsaber; Condition lukesLightsaberOnTable = new OnTableCondition(self, lukesLightsaber); Evaluator evaluator = new StackedEvaluator(self); List<Modifier> modifiers = new LinkedList<Modifier>(); modifiers.add(new CancelsGameTextModifier(self, lukesLightsaber, lukesLightsaberOnTable)); modifiers.add(new EachBattleDestinyModifier(self, lukesLightsaberOnTable, new NegativeEvaluator(evaluator), opponent)); return modifiers; } @Override protected List<TopLevelGameTextAction> getOpponentsCardGameTextTopLevelActions(final String playerId, final SwccgGame game, final PhysicalCard self, int gameTextSourceCardId) { GameTextActionId gameTextActionId = GameTextActionId.OTHER_CARD_ACTION_2; // Check condition(s) if (GameConditions.canBeCanceled(game, self)) { final TopLevelGameTextAction action = new TopLevelGameTextAction(self, playerId, gameTextSourceCardId, gameTextActionId); action.setText("Lose 2 Force to cancel"); action.setActionMsg("Lose 2 Force to cancel" + GameUtils.getCardLink(self)); // Pay cost(s) action.appendCost( new LoseForceEffect(action, playerId, 2, true)); // Perform result(s) action.appendEffect( new CancelCardOnTableEffect(action, self)); return Collections.singletonList(action); } return null; } }
def zero_pixels(self): zero_px = np.where(np.sum(self.raw_data, axis=2) == 0) zero_px = np.c_[zero_px[0], zero_px[1]] return zero_px
package contexts import ( "context" "reflect" "testing" ) func TestSetContextOperation(t *testing.T) { type args struct { op Operation } tests := []struct { name string args args want PartialContextFn }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := SetContextOperation(tt.args.op); !reflect.DeepEqual(got, tt.want) { t.Errorf("SetContextOperation() = %v, want %v", got, tt.want) } }) } } func TestAddContextOperation(t *testing.T) { type args struct { op Operation } tests := []struct { name string args args want PartialContextFn }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := AddContextOperation(tt.args.op); !reflect.DeepEqual(got, tt.want) { t.Errorf("AddContextOperation() = %v, want %v", got, tt.want) } }) } } func TestGetContextOperations(t *testing.T) { type args struct { ctx context.Context } tests := []struct { name string args args want Operations }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := GetContextOperations(tt.args.ctx); !reflect.DeepEqual(got, tt.want) { t.Errorf("GetContextOperations() = %v, want %v", got, tt.want) } }) } }
CoExpression of Melatonin (MEL1a) Receptor and Arginine Vasopressin mRNAs in the Siberian Hamster Suprachiasmatic Nucleus Durational melatonin signals, cued by the photoperiod and generated by the pineal gland, are processed in the brain to induce seasonally appropriate physiological and behavioural adaptations. The melatonin receptor subtype MEL1a (also known as mt1) appears to regulate seasonal responses. Single label in situ hybridization for MEL1a receptor mRNA revealed labelled cells in several brain regions of Siberian hamsters, including the suprachiasmatic nucleus, the paraventricular nucleus of the thalamus, and the reuniens nucleus of the thalamus. To characterize suprachiasmatic nucleus cells containing MEL1a receptor mRNA, we used 35Slabelled cRNA probes for MEL1a receptor mRNA in combination with digoxigeninlabelled cRNA probes for vasopressin, somatostatin, or orphan retinoid Z receptor (RZR; a putative nuclear melatonin receptor). Cells in the suprachiasmatic nucleus that contained MEL1a receptor mRNA also contained mRNAs for vasopressin and RZR, but not for somatostatin. These data suggest that suprachiasmatic nucleus vasopressin cells may respond to melatonin signals, raising the possibility that suprachiasmatic nucleus vasopressin output mediates some of the effects of melatonin on seasonal or circadian responses.
<reponame>First-Peoples-Cultural-Council/reactjs-nuxeo-dictionary-prototype package org.nuxeo.ecm.restapi.server.jaxrs.firstvoices; import ca.firstvoices.rest.data.Statistics; import ca.firstvoices.services.DialectStatisticsService; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.nuxeo.ecm.webengine.model.WebObject; import org.nuxeo.ecm.webengine.model.impl.DefaultObject; import org.nuxeo.runtime.api.Framework; /* Generates statistics about a particular dialect for all document types and several temporal ranges Uses aggregate queries for performance and sets caching headers on the response */ @WebObject(type = "statistics") @Produces({MediaType.APPLICATION_JSON}) public class StatisticsObject extends DefaultObject { /** * @param request * @param siteId * @return */ @GET @Path("generate") public Response generateStatistics( @Context HttpServletRequest request, @QueryParam(value = "siteId") String siteId) { DialectStatisticsService statisticsService = Framework.getService(DialectStatisticsService.class); try { final Statistics statistics = statisticsService.getStatisticsForDialect(siteId, DialectStatisticsService.VisibleTo.GUEST); final Response.ResponseBuilder responseBuilder = Response.ok(statistics); return responseBuilder.build(); } catch (DialectStatisticsService.StatisticsNotAvailableException e) { return Response.status(503).build(); } } }
<filename>src/test/java/junit/AssertJ/DogAssert.java package junit.AssertJ; import junit.Dog; import org.assertj.core.api.AbstractAssert; public class DogAssert extends AbstractAssert<DogAssert, Dog> { public DogAssert(Dog actual) { super(actual, DogAssert.class); } public static DogAssert assertThat(Dog actual) { return new DogAssert(actual); } public DogAssert withName(String name) { isNotNull(); if (actual.getName() != name) { failWithMessage("Expected the dog'name to be <%s> but was <%s>", name, actual.getName()); } return this; } }
//reconcileTCPRules try reconcile difference between these two TCPConfig slices func (r *ReconcileBCSEgress) reconcileTCPRules(tcps, cacheTCPs []*TCPConfig) (bool, error) { isChanged := false if len(tcps) == 0 && len(cacheTCPs) == 0 { klog.V(3).Infof("empty BCSEgress http rules & proxy http cache rules.") return isChanged, nil } if len(tcps) != len(cacheTCPs) { isChanged = true } egressRules := make(map[string]*TCPConfig) cacheRules := make(map[string]*TCPConfig) for _, newtcprule := range tcps { egressRules[newtcprule.Key()] = newtcprule } for _, cachetcprule := range cacheTCPs { cacheRules[cachetcprule.Key()] = cachetcprule } for key, newrule := range egressRules { if oldrule, ok := cacheRules[key]; ok { delete(cacheRules, key) if !newrule.IsChanged(oldrule) { delete(egressRules, key) } else { isChanged = true } } else { isChanged = true } } for k, v := range egressRules { if err := r.proxy.UpdateTCPRule(v); err != nil { klog.Errorf("EgressController Update tcp rule %s in reconcile failed, %s. details: %+v", k, err.Error(), v) return isChanged, err } klog.V(5).Infof("EgressController update tcp rule %s in cache successfully", k) } for k, v := range cacheRules { if err := r.proxy.DeleteTCPRule(k); err != nil { klog.Errorf("EgressController delete tcp rule %s in reconcil failed, %s, details: %+v", k, err.Error(), v) return isChanged, err } klog.V(5).Infof("EgressController delete tcp rule %s in cache successfully", k) } return isChanged, nil }
NBA Vintage Stories Remembering The Rens Buy your copy here! The third edition of The Official NBA Encyclopedia has been released and can be ordered on NBA.com or purchased in major bookstores. The following article was written exclusively for NBA.com as a supplement to the book and as an example of the type of historical information presented in the new encyclopedia. By John Hareas This is the greatest basketball team you have never heard of, a team so dominant that in one season it won 112 games and lost only seven. It was a team that won championships despite never being officially accepted professionally or socially. Yet, encountering obstacles on and off the court wasn't anything new for the New York Renaissance, the first all-black professional basketball team. All the Rens did was win, and in the process they served as catalysts for social equality. "They were literally pioneers and recognized that they were making a statement in front of the audiences," said Richard Lapchick, director of Northeastern University's Center for the Study of the Sport in Society. "And there were some audiences that didn't like that statement." Or teams, for that matter. The Rens chose to let their on-court actions do the talking. In their nearly three-decade existence, beginning in 1922, the Rens compiled a 2,588-529 record. "To this day, I have never seen a team play better team basketball," said Hall of Fame coach John Wooden, who played against the Rens when he was a member of the barnstorming Indianapolis Kautskys during the '30s. "They had great athletes, but they weren't as impressive as their team play. The way they handled and passed the ball was just amazing to me then, and I believe it would be today." "To this day, I have never seen a team play better team basketball. ... The way they handled and passed the ball was just amazing to me then, and I believe it would be today." -- John Wooden The Rens were the creation of Bob Douglas, known as the "Father of Black Basketball" at the Naismith Memorial Basketball Hall of Fame. Douglas started earning that reputation at age 25, when he organized two amateur basketball teams in Harlem called the Spartan Braves and Spartan Hornets. The Braves and Hornets competed against other New York City-area teams, both black and white, from 1919 to 1923. Douglas eventually became disenchanted with amateur basketball when he wasn't allowed to keep players who had received money from playing other sports. It marked the beginning of the end for Douglas' association with amateur basketball and the beginning of a new era -- the birth of the Rens. In 1923, Douglas cut a deal with the owners of Harlem's Renaissance Casino, which opened in 1922. Douglas organized a group of black basketball players and agreed to call the team the Renaissance, providing the casino with publicity. In return, the casino allowed the team to practice and play home games at the epicenter of the "renaissance" of black artistic expression in Harlem, located at 137th Street and Seventh Avenue, during the '20s. The Rens were another form of that expression in "New York's Prettiest Dance Hall" -- as advertised in the New York Amsterdam News -- between dances and big bands, that is. "It was twofold: People came to see the team and came to dance," said John Isaacs, who played with the Rens from 1936-41 and roomed on the road with Hall of Famer Pop Gates, one of the nation's finest all-around players. "Once the game was over, people stayed. It was like, 'Let's go back to dancing.'" Whether they were home or on the road, the Rens maintained a hectic schedule throughout the year, often playing more than 120 games. "We played every day and twice on Sunday," said Jim Usry, a member of the Rens from 1946-51. "We played all over -- Hartford, New Haven, Springfield. We'd play a road game in the afternoon and play back in New York that night." The Rens took on all comers, playing against semipro, black college and other professional teams, including the premier team of that era, the Original Celtics. Featuring Dutch Dehnert, Nat Holman and Joe Lapchick, the Celtics were known as extraordinary passers and showmen who revolutionized the way basketball was played. The games featuring the Rens and Celtics were hot tickets, with some contests drawing as many as 15,000 fans. The games were hard fought and civil on the court, but off the court was a different story. "There were race riots that took place during five of their games," said Richard Lapchick, son of Celtics center Joe Lapchick. "But the players believed that they represented a game that was something special in their lives." The Rens of the '20s featured such players as Frank Forbes, Harold "Fat" Jenkins, Leon Monde, "Wee" William Smith and Hall of Famer Chuck "Tarzan" Cooper, one of the game's great centers. "People called my father the first great big man in basketball," said Richard Lapchick. "He said Cooper would play him one-on-one as absolute equals." Joe Lapchick would eventually become coach of the New York Knicks and was responsible for signing the first African-American to an NBA contract, Nat "Sweetwater" Clifton. It was his relationship with Bob Douglas that taught Lapchick about racism in the United States. "My father used to ask Bobby to go out for drinks all the time after games and Bobby would say, 'No, no, no,'" said Richard Lapchick. "Finally, he realized why Bobby was saying no when, in 1926, he and Bobby had a conversation and Bobby said, 'You've got to understand, Joe, the places that you want to take me to, I'm not welcome. And I don't want to go in there and face the icy stare of racist white men.'" Unfortunately, icy stares followed by harsher treatment were everyday occurrences for Douglas and the Rens, especially when they traveled. "Sometimes you would sit at a restaurant counter, leafing through the menu," said Isaacs, "and you didn't see the man coming from behind the counter. And he sees you and walks to the wall and grabs his rifle and says, 'Get out of here.' You didn't have any choice but to leave." The lack of acceptance extended to the American Basketball League, which refused to admit the Rens in 1925. As a show of support for the Rens, the Original Celtics refused to join the league. Later, when the ABL suspended operation in 1931 because of the Great Depression, the Rens enjoyed their greatest success as a team. In two seasons, the Rens defeated the Original Celtics for the world basketball championship and posted their best regular-season record in 1932-33, when they won 112 games while losing eight. That season, the Rens won 88 consecutive games, doubling the Celtics' record of 44. While the Celtics were the benchmark team throughout the '20s, the Rens enjoyed that distinction during the '30s, citing their competition against the Celtics as one of the reasons. "The Rens learned a lot from the Celtics," said Isaacs. "They played with their heads. And when we played other teams, we instituted a lot of their stuff -- playing smart basketball, setting each other up. "They were good teachers and, after a while, the student started taking it to the teacher. It didn't matter when we played them. We knew we could beat them because we were in better condition than they were. We could run longer, run faster, jump higher." The Rens also had a financial incentive when playing against the Celtics in the '30s. "Every time we beat the Celtics on the road, depending on where we played, Douglas would take a look at the house and we would get an extra $50 that night or $25," said Isaacs. The success of the Rens helped pave the way for another all-black basketball team to emerge, the Harlem Globetrotters. Formed by Abe Saperstein in 1927, the Trotters established themselves as the clown princes of basketball, dazzling fans with their ballhandling and passing skills and on-court comedic tricks. "It wasn't so much a rivalry because it was a different type of operation," said Isaacs. "Theirs was entertainment and ours was straight basketball. We didn't play them that often." When they did play, the games were memorable. The Rens defeated the Globetrotters 27-23 in the third round of the 1939 world professional basketball tournament in Chicago. The Rens would go on to win the tournament by defeating the Oshkosh All-Stars 34-25. After the championship victory, Douglas purchased jackets for the team celebrating the triumph. The jacket read: "N.Y. Rens Colored World Champions." When Isaacs saw the inscription, he asked to borrow a razor blade from Douglas and then proceeded to meticulously remove the word "Colored" from the jacket. Douglas responded, "You're ruining the jacket." "No, I just made it real," said Isaacs. The Trotters did get their revenge the next year in the tournament, courtesy of a last-second, midcourt heave by Duke Cumberland to defeat the Rens 37-36. Despite the overwhelming success on the court, professional leagues still wouldn't admit the Rens. As coach of the Knicks in 1946, Joe Lapchick drove to Philadelphia and met with the Basketball Association of America owners. He hoped to persuade the nine men to admit the Rens as a 10th member. The request was denied. As the '50s approached, the basketball landscape had changed. The BAA had merged with the NBL and the Globetrotters were one of the sport's top attractions, traveling all over the globe to play in front of sellout crowds. "The Rens were the best and most popular traveling team that there had ever been in basketball, up to the point when the Globetrotters really came of age during the Goose Tatum and Sweetwater Clifton era," said Marques Haynes, who began playing with the Trotters in 1946. Saperstein would assume controlling interest of the Rens in 1949 and used them as a secondary club, having them play in the preliminary game before the Trotters performed. However, the double bill was short-lived as the Rens ended up disbanding that year, leaving the Trotters as the only all-black basketball team. For the Rens, it was the end of nearly 30 years of groundbreaking achievement on and off the court. But the team had earned the right to be called one of the greatest -- if not the greatest -- pre-NBA teams. "I was raised hearing that the Celtics were the greatest team of all time," said Richard Lapchick. "My dad's friends would say that and all our neighbors would say that. But he would correct them and say, 'The Rens were every bit as good as we were in the beginning and were better than us in the end.'" John Hareas is a member of the NBA Editorial Department. The Official NBA EncyclopediaThis is the greatest basketball team you have never heard of, a team so dominant that in one season it won 112 games and lost only seven. It was a team that won championships despite never being officially accepted professionally or socially. Yet, encountering obstacles on and off the court wasn't anything new for the New York Renaissance, the first all-black professional basketball team. All the Rens did was win, and in the process they served as catalysts for social equality."They were literally pioneers and recognized that they were making a statement in front of the audiences," said Richard Lapchick, director of Northeastern University's Center for the Study of the Sport in Society. "And there were some audiences that didn't like that statement."Or teams, for that matter. The Rens chose to let their on-court actions do the talking. In their nearly three-decade existence, beginning in 1922, the Rens compiled a 2,588-529 record."To this day, I have never seen a team play better team basketball," said Hall of Fame coach John Wooden, who played against the Rens when he was a member of the barnstorming Indianapolis Kautskys during the '30s. "They had great athletes, but they weren't as impressive as their team play. The way they handled and passed the ball was just amazing to me then, and I believe it would be today."The Rens were the creation of Bob Douglas, known as the "Father of Black Basketball" at the Naismith Memorial Basketball Hall of Fame. Douglas started earning that reputation at age 25, when he organized two amateur basketball teams in Harlem called the Spartan Braves and Spartan Hornets. The Braves and Hornets competed against other New York City-area teams, both black and white, from 1919 to 1923. Douglas eventually became disenchanted with amateur basketball when he wasn't allowed to keep players who had received money from playing other sports. It marked the beginning of the end for Douglas' association with amateur basketball and the beginning of a new era -- the birth of the Rens.In 1923, Douglas cut a deal with the owners of Harlem's Renaissance Casino, which opened in 1922. Douglas organized a group of black basketball players and agreed to call the team the Renaissance, providing the casino with publicity. In return, the casino allowed the team to practice and play home games at the epicenter of the "renaissance" of black artistic expression in Harlem, located at 137th Street and Seventh Avenue, during the '20s. The Rens were another form of that expression in "New York's Prettiest Dance Hall" -- as advertised in the-- between dances and big bands, that is."It was twofold: People came to see the team and came to dance," said John Isaacs, who played with the Rens from 1936-41 and roomed on the road with Hall of Famer Pop Gates, one of the nation's finest all-around players. "Once the game was over, people stayed. It was like, 'Let's go back to dancing.'"Whether they were home or on the road, the Rens maintained a hectic schedule throughout the year, often playing more than 120 games."We played every day and twice on Sunday," said Jim Usry, a member of the Rens from 1946-51. "We played all over -- Hartford, New Haven, Springfield. We'd play a road game in the afternoon and play back in New York that night."The Rens took on all comers, playing against semipro, black college and other professional teams, including the premier team of that era, the Original Celtics. Featuring Dutch Dehnert, Nat Holman and Joe Lapchick, the Celtics were known as extraordinary passers and showmen who revolutionized the way basketball was played.The games featuring the Rens and Celtics were hot tickets, with some contests drawing as many as 15,000 fans. The games were hard fought and civil on the court, but off the court was a different story."There were race riots that took place during five of their games," said Richard Lapchick, son of Celtics center Joe Lapchick. "But the players believed that they represented a game that was something special in their lives."The Rens of the '20s featured such players as Frank Forbes, Harold "Fat" Jenkins, Leon Monde, "Wee" William Smith and Hall of Famer Chuck "Tarzan" Cooper, one of the game's great centers."People called my father the first great big man in basketball," said Richard Lapchick. "He said Cooper would play him one-on-one as absolute equals."Joe Lapchick would eventually become coach of the New York Knicks and was responsible for signing the first African-American to an NBA contract, Nat "Sweetwater" Clifton. It was his relationship with Bob Douglas that taught Lapchick about racism in the United States."My father used to ask Bobby to go out for drinks all the time after games and Bobby would say, 'No, no, no,'" said Richard Lapchick. "Finally, he realized why Bobby was saying no when, in 1926, he and Bobby had a conversation and Bobby said, 'You've got to understand, Joe, the places that you want to take me to, I'm not welcome. And I don't want to go in there and face the icy stare of racist white men.'"Unfortunately, icy stares followed by harsher treatment were everyday occurrences for Douglas and the Rens, especially when they traveled."Sometimes you would sit at a restaurant counter, leafing through the menu," said Isaacs, "and you didn't see the man coming from behind the counter. And he sees you and walks to the wall and grabs his rifle and says, 'Get out of here.' You didn't have any choice but to leave."The lack of acceptance extended to the American Basketball League, which refused to admit the Rens in 1925. As a show of support for the Rens, the Original Celtics refused to join the league. Later, when the ABL suspended operation in 1931 because of the Great Depression, the Rens enjoyed their greatest success as a team. In two seasons, the Rens defeated the Original Celtics for the world basketball championship and posted their best regular-season record in 1932-33, when they won 112 games while losing eight. That season, the Rens won 88 consecutive games, doubling the Celtics' record of 44.While the Celtics were the benchmark team throughout the '20s, the Rens enjoyed that distinction during the '30s, citing their competition against the Celtics as one of the reasons."The Rens learned a lot from the Celtics," said Isaacs. "They played with their heads. And when we played other teams, we instituted a lot of their stuff -- playing smart basketball, setting each other up."They were good teachers and, after a while, the student started taking it to the teacher. It didn't matter when we played them. We knew we could beat them because we were in better condition than they were. We could run longer, run faster, jump higher."The Rens also had a financial incentive when playing against the Celtics in the '30s."Every time we beat the Celtics on the road, depending on where we played, Douglas would take a look at the house and we would get an extra $50 that night or $25," said Isaacs.The success of the Rens helped pave the way for another all-black basketball team to emerge, the Harlem Globetrotters. Formed by Abe Saperstein in 1927, the Trotters established themselves as the clown princes of basketball, dazzling fans with their ballhandling and passing skills and on-court comedic tricks."It wasn't so much a rivalry because it was a different type of operation," said Isaacs. "Theirs was entertainment and ours was straight basketball. We didn't play them that often."When they did play, the games were memorable. The Rens defeated the Globetrotters 27-23 in the third round of the 1939 world professional basketball tournament in Chicago. The Rens would go on to win the tournament by defeating the Oshkosh All-Stars 34-25.After the championship victory, Douglas purchased jackets for the team celebrating the triumph. The jacket read: "N.Y. Rens Colored World Champions."When Isaacs saw the inscription, he asked to borrow a razor blade from Douglas and then proceeded to meticulously remove the word "Colored" from the jacket.Douglas responded, "You're ruining the jacket.""No, I just made it real," said Isaacs.The Trotters did get their revenge the next year in the tournament, courtesy of a last-second, midcourt heave by Duke Cumberland to defeat the Rens 37-36.Despite the overwhelming success on the court, professional leagues still wouldn't admit the Rens. As coach of the Knicks in 1946, Joe Lapchick drove to Philadelphia and met with the Basketball Association of America owners. He hoped to persuade the nine men to admit the Rens as a 10th member. The request was denied.As the '50s approached, the basketball landscape had changed. The BAA had merged with the NBL and the Globetrotters were one of the sport's top attractions, traveling all over the globe to play in front of sellout crowds."The Rens were the best and most popular traveling team that there had ever been in basketball, up to the point when the Globetrotters really came of age during the Goose Tatum and Sweetwater Clifton era," said Marques Haynes, who began playing with the Trotters in 1946.Saperstein would assume controlling interest of the Rens in 1949 and used them as a secondary club, having them play in the preliminary game before the Trotters performed. However, the double bill was short-lived as the Rens ended up disbanding that year, leaving the Trotters as the only all-black basketball team.For the Rens, it was the end of nearly 30 years of groundbreaking achievement on and off the court. But the team had earned the right to be called one of the greatest -- if notgreatest -- pre-NBA teams."I was raised hearing that the Celtics were the greatest team of all time," said Richard Lapchick. "My dad's friends would say that and all our neighbors would say that. But he would correct them and say, 'The Rens were every bit as good as we were in the beginning and were better than us in the end.'"
<gh_stars>1-10 ../../../../.symlinks/plugins/google_maps_flutter/ios/Classes/GoogleMapController.h
// DrawMyStuff: This function draws a series of strips made of simple // triangles. The first two vertices are selected, and then with each // additional vertex, a triangle is created. The color of the triangle // is determined from the altitude[][] matrix, and then the normals // are calculated. In order to handle the various manipulations to // the surface, various translations and rotations are applied. void DrawMyStuff() { int a,b; glLoadIdentity(); gluLookAt(MAX, MAX/2.0, 2*MAX, MAX/2.0, -1.0, MAX/2.0, 0.0, 1.0, 0.0); glTranslatef(MAX/2.0, 0.0, MAX/2.0 ); glRotatef(spin, 0.0, 1.0, 0.0); glRotatef(spin2, 1.0, 0.0, 0.0); glTranslatef(-MAX/2.0, 0.0, -MAX/2.0 ); glTranslatef(0.0, 0.0, -z ); for( a=1;a<MAX-1;a++) { glBegin(GL_TRIANGLE_STRIP); glColor3f(colors[a][0].rd,colors[a][0].gr,colors[a][0].bl); glVertex3f(a, altitude[a][0], 0 ); glVertex3f(a+1, altitude[a+1][0], 0); for( b=1;b<MAX-1;b++) { glColor3f(colors[a][b].rd,colors[a][b].gr,colors[a][b].bl); glVertex3f(a, altitude[a][b], b); trinormal(a,altitude[a][b-1],b-1, a,altitude[a][b],b, a+1,altitude[a+1][b-1],b-1); glVertex3f(a+1, altitude[a+1][b], b); trinormal(a,altitude[a][b],b, a+1,altitude[a+1][b],b, a,altitude[a][b-1],b-1); } glEnd(); } }
package io.atleon.rabbitmq; public interface BodySerializer<T> extends Configurable { SerializedBody serialize(T t); }
The 100th anniversary of the death of the most senior ranked soldier from the Corby borough to be killed in the First World War is to be marked this weekend. On Sunday (October 16) Corby Council will commemorate the death of Colonel George Eustace Ripley, who was commander of the 6th Battalion, Northamptonshire Regiment. Colonel Ripley and his wife had been living in Bury House, Cottingham, since 1902. George Eustace Ripley was born in Norfolk in 1864 and educated at Rugby School. He was a professional soldier, firstly with the Norfolk Regiment before moving to the Northamptonshire Regiment in 1884. He served in the South African War (1899-1902) and retired reluctantly from the Army a few months before the outbreak of the First World War. At the outbreak of hostilities he applied to be reinstated and was eventually given command of the 6th Battalion, Northamptonshire Regiment, in October 1914. Many local men were to serve in the 6th Battalion under Colonel Ripley. The Northamptonshire Regiment had been involved in a successful operation to take Trones Wood in July 1916. In a subsequent engagement at Thiepval in September 1916 tragedy was to strike. The following extract from The Northamptonshire Regiment 1914-1918 is reproduced by kind permission of The Naval & Military Press Ltd. “Many were the congratulatory messages showered on the 6th Battalion. But the victory was dearly bought. “Within three hours, 15 officers had fallen. At the finish, Major Charrington and two young subalterns were all that were left. “In other ranks, 32 were killed, 204 wounded and 35 were missing. After a rest and reorganisation, the battalion took part in the capture of Thiepval. “Here again the fighting was very severe, and the casualties heavier even than at Trones Wood. “The killed numbered 105 and in wounded and missing the casualties were 258. “Five officers were killed or died of wounds, and 9 wounded, and amongst the former was Colonel Ripley, who succumbed after the amputation of his right arm. “No officer could have held the affection and confidence of men in a higher degree. “Thereafter, the 6th remained in trenches in the Albert area, constantly under heavy fire, and spending a dreary winter in snow and frost.” The Colonel was evacuated back to England after sustaining his injury to convalesce, the amputation having been performed at a base hospital. Records show that Colonel George Eustace Ripley died at 10 Carlton House, Middlesex, on October 16, 1916. He was buried in Cottingham Churchyard. Following his death he was honoured with an inscribed memorial plaque in the Holy Sepulchre Church, Sheep Street, Northampton. This was unveiled by Lord Spencer. It reads: To the glory of God and to the memory of Colonel George Eustace Ripley Late commanding officer of the 6th Bttn Northamptonshire Regt Who died in London from wounds received in the attack On Thiepval, France 26th September 1916 aged 52 and was Buried in Cottingham in this county on October 19th 1916 This window and brass are Placed here by officers and men of the Northamptonshire Regiment with other friends as a Testimony to the high esteem in which this distinguished Officer was held for his ability, courage and the many Attractive personal gifts which adorned his long and honoured military career Further information on this soldier will be presented on the Cube Helpdesk, shared through Twitter, @CorbyBC, and can be found on Corby Borough Council’s website, www.corby.gov.uk as part of the First World War commemorations.
Methane trapped in Arctic ice (and elsewhere) could be rapidly released into the atmosphere as a result of global warming in a possible doomsday scenario for climate change, some scientists worry. After all, methane is 72 times more powerful as a greenhouse gas than carbon dioxide over a 20-year timescale. But research announced at the annual meeting of the American Geophysical Union this December suggests that marine microbes could at least partially defeat the methane "time bomb" sitting at the bottom of the world's oceans. The conventional wisdom for decades has been that methane emanating from the seafloor could be consumed by a special class of bacteria called methanotrophs. It has long been known, for instance, that these organisms at the bottom of the Black Sea consume methane produced in its deep oxygen-free waters. What has not been clear is whether these bacteria would be of any use in the event that a special class of ice at the bottom of the ocean is destabilized by a warmer climate. This ice, known as clathrates, or methane hydrates, consists of a cage of water molecules surrounding individual molecules of methane, and it exists under conditions of low temperature and high pressure. These conditions can be found on the continental shelf the world over, but there is an extra large quantity of seafloor suitable for methane hydrates in the Arctic because of its low temperatures and a seafloor plateau that happens to be at the optimum depth for clathrate formation. The Arctic also happens to be more vulnerable to climate change because parts of the poles are warming at least twice as fast as the rest of the world. To investigate this Arctic ice more carefully, Scott Elliott, a biogeochemist at Los Alamos National Laboratory, used the Coyote supercomputer to model the complex interplay of physical and biological systems that govern the fate of methane released from Arctic clathrates during the first few decades of projected future global warming. Elliott's model includes the activity of methanotrophs. In accordance with conventional wisdom, his virtual bacteria can keep up with small to medium-size failures of the clathrates and subsequent releases of methane gas. As the "burps" of methane increase in size in response to warming seas, however, his model also shows that in some areas of the Arctic, the methanotrophs could potentially run out of the nutrients required to metabolize methane, including oxygen, nitrate, iron and copper. But, even if the methanotrophs in the Arctic run out of the nutrients required to digest methane—especially if the waters in which they normally live become anoxic (low in the oxygen modern life-forms need to survive)—a second phenomenon demonstrated in Elliott's models may yet prevent methane from percolating all the way to the surface of the ocean, and then into the atmosphere. "It happens that the Arctic Ocean is capped with a relatively fresh layer of seawater," Elliott says. Freshwater from the many rivers that empty into the Arctic float atop the denser ocean brine. In Elliott's simulations, methane hits this fresh water "cap" and cannot escape into the atmosphere. Instead, it "hangs out in the Arctic Ocean until it flows out into the deep, abyssal Atlantic Ocean," Elliott says. "The time constants in deep oceans are many hundreds of years—that's long enough for methanotrophs to consume all the methane. The model says that right now we have multiple layers of security." Elliot cautions that there is a large degree of uncertainty in the results generated by his model, which is the first attempt ever made to incorporate the biological activity of methanotrophs into a regional climate model. Vincent Gauci, lecturer in Earth systems and ecosystem science at The Open University in England, agrees that the uncertainties in the model prevent it from being used to conclude whether or not the methane released from deep-sea clathrates will enter the atmosphere, especially in the event of a "catastrophic submarine slope failure," in which large volumes of clathrate spontaneously collapse and release their stored methane. This is an outrageously complex problem," Elliott says. Dave Valentine, associate professor of microbial geochemistry at the University of California, Santa Barbara, who heard Elliott's talk, notes that paleoclimatologists have yet to definitively answer whether or not there is evidence that methane from clathrates ever reached the atmosphere in the past, which would support the conclusions of Elliott's model. University of Chicago geophysicist David Archer's The Long Thaw: How Humans Are Changing the Next 100,000 Years of Earth's Climate (Princeton University Press, 2008), notes that the "jury is still out" on what the role was, if any, of destabilizing methane hydrates in past global warming events and mass extinctions. Even if methane doesn't reach the atmosphere, Elliott's model suggests it could still have dire effects on the Arctic environment: As it is oxidized by methanotrophs, it will acidify the Arctic Ocean and turn the water into a anoxic "dead zone" analogous to the oxygen-free dead zones that show up in the Gulf of Mexico every year as a result of farmland fertilizer runoff carried by the Mississippi River. "It would mean those nutrients [oxygen] are not available to other organisms," Elliott says. "In other words, maybe we're safe, but other organisms are not." On a local level these changes would be equal to or even greater than the acidification of the ocean that is already occurring because of rising levels of atmospheric carbon dioxide. "It would be a very serious environmental issue—but regional, not global," Elliott says. Future iterations of Elliott's model will have to include a class of methane-digesting bacteria not included in its first version, says Rick Colwell, an Oregon State University marine microbiologist specializing in methanotrophs who attended a recent presentation by Elliott. These yet-to-be-modeled bacteria operate only in anaerobic conditions that are usually found only in ocean sediments. If conventional, oxygen-dependent methanotrophs deplete the water column of oxygen, it could create conditions favorable for anaerobic methane-digesting bacteria to carry on the work of digesting the methane—flopping these parts of the ocean back to conditions that last prevailed 250 million years ago, during the most devastating mass extinction ever to befall life on Earth. University of Washington in Seattle paleontologist Peter Ward has hypothesized that this event, known as the Great Dying, was the result of runaway global warming that turned the majority of the world's oceans anoxic throughout their entire depths, leading to a large release of hydrogen sulfide gas, a by-product of the metabolism of anaerobic bacteria. Elliott would not speculate whether or not the phenomena he modeled could have been part of that event, which in Ward's hypothesis was most likely caused by a different source of carbon all together: CO2 vented from massive volcanic eruptions in a region that is now part of Siberia. "You could refer to these [anaerobic methane-eating bacteria] as a 'biofilter'—they would consume some of the methane that is moving into the water," Colwell says. Already, he adds, these bacteria perform this role in anoxic environments like the depths of the Black Sea. Also, recent results from the Svalbard Islands north of Norway suggest that methane may not always rise from the water column in the way that Elliott's model assumes. In most models, including Elliott's, methanotrophs in the water were able to digest methane because it diffused into the water. Around Spitsbergen, however, 250 plumes rising from the bottom of the ocean included large bubbles which could ascend much higher up the water column before dispersing, increasing the danger that they could reach the atmosphere intact. Ultimately, Elliott says, he and his team cannot eliminate the possibility that methanotrophs in the Arctic could be overwhelmed by large burps of methane gas from clathrates. Valentine speculates that the limiting nutrient will be oxygen, but Elliott's model raises some other potentially interesting possibilities. When asked whether or not fertilizing the Arctic Ocean with some of the missing nutrients that could enhance the productivity of methanotrophs, such as iron, Elliott speculates that "I would bet that someone will very soon discuss the potential for engineering this situation. This becomes an opportunity for the geoengineering types to become creative."
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h" #include "tensorflow/lite/experimental/microfrontend/lib/bits.h" #include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h" #define kuint16max 0x0000FFFF // The following functions implement integer logarithms of various sizes. The // approximation is calculated according to method described in // www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/ // publicaciones/SPL2007/Log10-spl07.pdf // It first calculates log2 of the input and then converts it to natural // logarithm. static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) { // Part 1 int32_t frac = x - (1LL << log2x); if (log2x < kLogScaleLog2) { frac <<= kLogScaleLog2 - log2x; } else { frac >>= log2x - kLogScaleLog2; } // Part 2 const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2); const uint32_t seg_unit = (((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2; const int32_t c0 = kLogLut[base_seg]; const int32_t c1 = kLogLut[base_seg + 1]; const int32_t seg_base = seg_unit * base_seg; const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2; return frac + c0 + rel_pos; } static uint32_t Log(const uint32_t x, const uint32_t scale_shift) { const uint32_t integer = MostSignificantBit32(x) - 1; const uint32_t fraction = Log2FractionPart(x, integer); const uint32_t log2 = (integer << kLogScaleLog2) + fraction; const uint32_t round = kLogScale / 2; const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2; // Finally scale to our output scale const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2; return loge_scaled; } uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal, int signal_size, int correction_bits) { const int scale_shift = state->scale_shift; uint16_t* output = (uint16_t*)signal; uint16_t* ret = output; int i; for (i = 0; i < signal_size; ++i) { uint32_t value = *signal++; if (state->enable_log) { if (correction_bits < 0) { value >>= -correction_bits; } else { value <<= correction_bits; } if (value > 1) { value = Log(value, scale_shift); } else { value = 0; } } *output++ = (value < kuint16max) ? value : kuint16max; } return ret; }
package com.ak47.digiboard.model; import java.util.ArrayList; public class QuizListModel { private String quizName, quizDescription; private Boolean publishInfo; private ArrayList<QuestionListModel> questionsList; private String createdDateTime; public QuizListModel(String quizName, String quizDescription, Boolean publishInfo, ArrayList<QuestionListModel> questionsList, String createdDateTime) { this.quizName = quizName; this.quizDescription = quizDescription; this.publishInfo = publishInfo; this.questionsList = questionsList; this.createdDateTime = createdDateTime; } public QuizListModel() { } public String getCreatedDateTime() { return createdDateTime; } public void setCreatedDateTime(String createdDateTime) { this.createdDateTime = createdDateTime; } public String getQuizName() { return quizName; } public void setQuizName(String quizName) { this.quizName = quizName; } public String getQuizDescription() { return quizDescription; } public void setQuizDescription(String quizDescription) { this.quizDescription = quizDescription; } public Boolean getPublishInfo() { return publishInfo; } public void setPublishInfo(Boolean publishInfo) { this.publishInfo = publishInfo; } public ArrayList<QuestionListModel> getQuestionsList() { return questionsList; } public void setQuestionsList(ArrayList<QuestionListModel> questionsList) { this.questionsList = questionsList; } }
<reponame>quirl-share/upgraded-chainsaw<gh_stars>0 // // Created by andreas.eibler on 11.05.2020. // #pragma once #include "../messages/avro.hh" class Serializer { public: static avro::Message create(); static size_t binary_encode(const avro::Message source, const char* buffer, size_t capacity); static avro::Message& binary_decode(const char* buffer, size_t size, avro::Message& dst); };
It will be interesting to see what happens later today with the Billy Corgan - TNA situation as Corgan's attorneys will be allowed to file a redacted version of their lawsuit to protect certain materials from being brought to the public light as Corgan has claimed "sensitive" information could fall into the hands of their competitors. That redacted version will be available to the public sometime around 4pm EST today. It's possible that if there is going to be a settlement between the sides, it will be made before the documents are unsealed by the Chancery Court of Nashville. As noted, Corgan filed a motion last week to compel Dixie Carter-Salinas and Impact Ventures, LLC to "provide full and fair responses" after not being satisfied with previous responses. Dixie responded by saying Impact Ventures was not insolvent and has not been insolvent since August 11th, 2016, stating that they do not fit the state of Tennessee's definition of insolvent, meaning, Impact Ventures' assets are of greater value than its debt. If TNA and the related defendants in the lawsuit do not admit to claims by Corgan and his lawyers, then they have to hand over all documents related to Impact Ventures being insolvent or unable to pay all of its debts in full as it came due from January 1st, 2016 to the present, all documents sent to or received from WWE or any of its representatives from January 1st, 2016 to present and all documents sent to or received from "any third party" regarding that third party's potential acquisition of Impact Ventures LLC, TNA Entertainment LLC or any of the assets owned by either entity. Corgan asked that the ownership of the TNA library be stated as well as identify the nature and extent of "each member's" ownership interest, that the company identify all of the current officers, directors and managers, identify all of it's assets, and for the LLC to produce financial statements. 17,000 pages of documents were being "reviewed and assessed" by Corgan's attorneys which they received from TNA and other defendants. Source: PWInsider
<filename>aioweb/modules/email/__init__.py import email from email.message import EmailMessage import aiosmtplib from aioweb.conf import settings async def send_mail(app, sender='%s service <%s>' % (settings.BRAND, settings.SERVER_EMAIL), recipients=tuple(), subject='', body=''): if not len(recipients): return async with app.smtp as conn: msg = EmailMessage() msg['Subject'] = subject msg['From'] = sender msg['To'] = ', '.join(recipients) msg.set_content(body) return await conn.send_message(msg, sender, recipients, timeout=5) async def setup(app): setattr(app, 'smtp', aiosmtplib.SMTP(hostname=app.conf.get('email.host', 'localhost'), port=app.conf.get('email.port', 25), timeout=app.conf.get('email.timeout', 5), loop=app.loop))
Some years ago, travelling on the presidential plane of Hugo Chávez of Venezuela with a French friend from Le Monde Diplomatique, we were asked what we thought was happening in Europe. Was there any chance of a move to the left? We replied in the depressed and pessimistic tones typical of the early years of the 21st century. Neither in Britain nor France, nor anywhere in the eurozone, did we see much chance of a political breakthrough. Then maybe, said Chávez with a twinkle, we could come to your assistance, and he recalled the time in 1830 when revolutionary crowds in the streets of Paris had come out waving the cap of Simón Bolívar, the South American liberator from Venezuela who was to die at the end of that year. Fighting for liberty, Latin American style, was held up as the path for Europe to follow. At the time, I was encouraged but not persuaded by Chávez's optimism. Yet now I think that he was right; it was good to be reminded that Alexis Tsipras, the leader of Greece's radical left party, Syriza, had visited Caracas in 2007 and inquired about the future possibility of receiving cheap Venezuelan oil, much as Cuba and other Caribbean and Central America countries do. There was a brief moment when Ken Livingstone and Chávez conjured up an oil deal between London and Caracas which looked promising until it was rejected by Boris Johnson. More important than the prospect of cheap oil is the power of example. Chávez has been engaged since the turn of the century, even before, on a project that rejects the neoliberal economics that afflicts Europe and much of the western world. He has been opposed to the recipes of the World Bank and the International Monetary Fund, and has fought hard against the policies of privatisation that harmed the social and economic fabric of Latin America and with which the European Union is now threatening to destroy the economy of Greece. Chávez has renationalised the many industries, including oil and gas, that were privatised in the 1990s. The words and inspiration of Chávez have had an effect beyond Venezuela. They have encouraged Argentina to default on its debt; to reorganise its economy thereafter and to renationalise its oil industry. Chávez has helped Evo Morales of Bolivia to run its oil and gas industry for the benefit of the country rather than its foreign shareholders, and more recently to halt the robbery by Spain of the profits of its electricity company. Above all, he has shown the countries of Latin America that there is an alternative to the single neoliberal message that has been endlessly broadcast for decades, by governments and the media in hock to an outdated ideology. Now is the time for that alternative message to be heard further afield, to be listened to by voters in Europe. In Latin America, governments following an alternative strategy have been re-elected time and time again, suggesting that it is effective and popular. In Europe, governments of whatever hue that follow the standard neoliberal template seem to fall at the first fence, suggesting that the will of the people is not engaged. Chávez and his co-religionaries in the new "Bolivarian revolution" have called for "21st-century socialism", not a return to Soviet-style economics or the continuation of the mundane social democratic adaptation of capitalism, but, as the Ecuadorean president Rafael Correa has described it, the re-establishment of national planning by the state "for the development of the majority of the people". Greece has a wonderful chance to change the history of Europe and to throw their caps of Bolívar into the air, as once the Italian carbonari did in Paris all those years ago. Lord Byron, who planned to settle in Bolívar's Venezuela before sailing off to help liberate Greece, named his yacht Bolívar; he would certainly have been pleased with contemporary developments. • Follow Comment is free on Twitter @commentisfree
Peritonitis and splenitis caused by MAC-infection in an immunosuppressed patient Primary (hematogenic) peritonitis caused by non-tuberculosis mycobacteria is extremely rare in the clinical practice. The main number of reported episodes of primary intraabdominal infection is associated with M. tuberculosis and the development of granulomatous inflammation of the peritoneum visually similar to carcinomatosis. The vast majority of reports of peritonitis associated with non-tuberculosis mycobacteria are interlinked with chronic peritoneal dialysis or foreign bodies of the abdominal cavity, when an infection is carried out by the contact through a dialysis catheter, prosthesis or a gastric banding device. The article describes a clinical case of peritonitis and splenitis caused by M. avium with hematogenic spread of infection from the primary pulmonary focus in a young patient with immunosuppression. Diagnosis of such peritonitis at the preoperative stage is extremely difficult due to the similarity of symptoms with atypical appendicitis or infected ascites. The intraoperative picture also did not allow us to assume a mycobacterial etiology of the process, and the absence of a focal point of peritonitis made it necessary to thoroughly understand the situation. Only a peritoneal biopsy and a complete laboratory examination of exudate allowed us to verify the diagnosis, to understand the pathogenetic mechanisms of the disease and to start a timely etiotropic therapy.
Recombinant bovine respiratory syncytial virus with deletions of the G or SH genes: G and F proteins bind heparin. Bovine respiratory syncytial virus (BRSV) encodes three transmembrane envelope glycoproteins, namely the small hydrophobic (SH) protein, the attachment glycoprotein (G) and the fusion glycoprotein (F). The BRSV reverse genetics system has been used to generate viable recombinant BRSV lacking either the G gene or the SH gene or both genes. The deletion mutants were fully competent for multicycle growth in cell culture, proving that, of the BRSV glycoprotein genes, the SH and G genes are non-essential. Virus morphogenesis was not impaired by either of the deletions. The deletion mutants were used to study the role of the F glycoprotein and the contributions of SH and G with respect to virus attachment. Attachment mediated by the F protein alone could be blocked by soluble heparin, but not by chondroitin sulphate. Heparin affinity chromatography revealed that both the BRSV G and F glycoproteins have heparin-binding activity, with the affinity of the F glycoprotein being significantly lower than that of G. Therefore, the roles of the BRSV glycoproteins in virus attachment and receptor binding have to be reconsidered.
<filename>api/src/test/java/de/adesso/objectfieldcoverage/api/assertion/primitive/PrimitiveTypeUtilsTest.java package de.adesso.objectfieldcoverage.api.assertion.primitive; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; import spoon.reflect.code.CtExpression; import spoon.reflect.declaration.CtField; import spoon.reflect.factory.TypeFactory; import spoon.reflect.reference.CtTypeReference; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.BDDMockito.given; @ExtendWith(MockitoExtension.class) class PrimitiveTypeUtilsTest { @Test @SuppressWarnings("rawtypes") void isCandidateForBooleanTypeAssertionReturnsTrueWhenExpressionTypeIsPrimitiveBoolean(@Mock CtExpression expressionMock) { // given var booleanPrimitive = new TypeFactory().BOOLEAN_PRIMITIVE; given(expressionMock.getType()).willReturn(booleanPrimitive); // when var actualResult = PrimitiveTypeUtils.isCandidateForPrimitiveTypeAssertion(expressionMock); // then assertThat(actualResult).isTrue(); } @Test @SuppressWarnings("rawtypes") void isCandidateForBooleanTypeAssertionReturnsTrueWhenExpressionTypeIsWrapperBoolean(@Mock CtExpression expressionMock) { // given var booleanWrapper = new TypeFactory().BOOLEAN; given(expressionMock.getType()).willReturn(booleanWrapper); // when var actualResult = PrimitiveTypeUtils.isCandidateForPrimitiveTypeAssertion(expressionMock); // then assertThat(actualResult).isTrue(); } @Test @SuppressWarnings("rawtypes") void isCandidateForBooleanTypeAssertionReturnsFalseWhenExpressionTypeDoesNotMatch(@Mock CtExpression expressionMock, @Mock CtTypeReference typeRefMock) { // given given(expressionMock.getType()).willReturn(typeRefMock); // when var actualResult = PrimitiveTypeUtils.isCandidateForPrimitiveTypeAssertion(expressionMock); // then assertThat(actualResult).isFalse(); } @Test void isPrimitiveTypeFieldReturnsTrueWhenFieldTypeIsIntegerPrimitive(@Mock CtField<Integer> field) { // given var intPrimitiveTypeRef = new TypeFactory().INTEGER_PRIMITIVE; given(field.getType()).willReturn(intPrimitiveTypeRef); // when var actualResult = PrimitiveTypeUtils.isPrimitiveTypeField(field); // then assertThat(actualResult).isTrue(); } @Test void isPrimitiveTypeFieldReturnsTrueWhenFieldTypeIsIntegerWrapper(@Mock CtField<Integer> field) { // given var intPrimitiveTypeRef = new TypeFactory().INTEGER; given(field.getType()).willReturn(intPrimitiveTypeRef); // when var actualResult = PrimitiveTypeUtils.isPrimitiveTypeField(field); // then assertThat(actualResult).isTrue(); } @Test void isPrimitiveTypeFieldReturnsFalseWhenFieldTypeIsOtherReferenceType(@Mock CtField<String> field) { // given var intPrimitiveTypeRef = new TypeFactory().createReference(String.class); given(field.getType()).willReturn(intPrimitiveTypeRef); // when var actualResult = PrimitiveTypeUtils.isPrimitiveTypeField(field); // then assertThat(actualResult).isFalse(); } @Test void getPrimitiveTypeReferenceReturnsBooleanPrimitiveForBoolean() { // given var givenPrimitiveTypeName = "boolean"; var expectedTypeRef = new TypeFactory().BOOLEAN_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsBytePrimitiveForByte() { // given var givenPrimitiveTypeName = "byte"; var expectedTypeRef = new TypeFactory().BYTE_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsShortPrimitiveForShort() { // given var givenPrimitiveTypeName = "short"; var expectedTypeRef = new TypeFactory().SHORT_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsIntPrimitiveForInt() { // given var givenPrimitiveTypeName = "int"; var expectedTypeRef = new TypeFactory().INTEGER_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsLongPrimitiveForLong() { // given var givenPrimitiveTypeName = "long"; var expectedTypeRef = new TypeFactory().LONG_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsCharPrimitiveForChar() { // given var givenPrimitiveTypeName = "char"; var expectedTypeRef = new TypeFactory().CHARACTER_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsFloatPrimitiveForFloat() { // given var givenPrimitiveTypeName = "float"; var expectedTypeRef = new TypeFactory().FLOAT_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceReturnsDoublePrimitiveForDouble() { // given var givenPrimitiveTypeName = "double"; var expectedTypeRef = new TypeFactory().DOUBLE_PRIMITIVE; // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName); // then assertThat(actualTypeRef).isEqualTo(expectedTypeRef); } @Test void getPrimitiveTypeReferenceThrowsExceptionForOtherString() { // given var givenPrimitiveTypeName = "unknown"; // when / then assertThatThrownBy(() -> PrimitiveTypeUtils.getPrimitiveTypeReference(givenPrimitiveTypeName)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("'%s' is not a primitive type!", givenPrimitiveTypeName); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForBoolean() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.BOOLEAN_PRIMITIVE; var expectedTypeRef = typeFactory.BOOLEAN_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForBooleanWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.BOOLEAN; var expectedTypeRef = typeFactory.BOOLEAN_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForByte() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.BYTE_PRIMITIVE; var expectedTypeRef = typeFactory.BYTE_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForByteWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.BYTE; var expectedTypeRef = typeFactory.BYTE_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForChar() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.CHARACTER_PRIMITIVE; var expectedTypeRef = typeFactory.CHARACTER_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForCharWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.CHARACTER; var expectedTypeRef = typeFactory.CHARACTER_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForShort() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.SHORT_PRIMITIVE; var expectedTypeRef = typeFactory.SHORT_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForShortWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.SHORT; var expectedTypeRef = typeFactory.SHORT_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForInt() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.INTEGER_PRIMITIVE; var expectedTypeRef = typeFactory.INTEGER_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForIntWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.INTEGER; var expectedTypeRef = typeFactory.INTEGER_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForLong() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.LONG_PRIMITIVE; var expectedTypeRef = typeFactory.LONG_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForLongWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.LONG; var expectedTypeRef = typeFactory.LONG_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForFloat() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.FLOAT_PRIMITIVE; var expectedTypeRef = typeFactory.FLOAT_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForFloatWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.FLOAT; var expectedTypeRef = typeFactory.FLOAT_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForDouble() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.DOUBLE_PRIMITIVE; var expectedTypeRef = typeFactory.DOUBLE_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReturnsExpectedTypeRefForDoubleWrapper() { // given var typeFactory = new TypeFactory(); var givenTypeRef = typeFactory.DOUBLE; var expectedTypeRef = typeFactory.DOUBLE_PRIMITIVE; // when / then assertGetPrimitiveType(givenTypeRef, expectedTypeRef); } @Test void getPrimitiveTypeReferenceThrowsExceptionWhenGivenTypeIsNotAPrimitiveType() { // given var givenTypeRef = new TypeFactory().createReference(String.class); // when / then assertThatThrownBy(() -> PrimitiveTypeUtils.getPrimitiveTypeReference(givenTypeRef)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("The given type is not a primitive or wrapper type!"); } private void assertGetPrimitiveType(CtTypeReference<?> givenTypeRef, CtTypeReference<?> expectedResult) { // when var actualTypeRef = PrimitiveTypeUtils.getPrimitiveTypeReference(givenTypeRef); // then assertThat(actualTypeRef).isEqualTo(expectedResult); } }
<gh_stars>0 /* * Copyright 2014 OpenMarket Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package im.vector.activity; import android.annotation.SuppressLint; import android.content.ClipData; import android.content.Intent; import android.content.SharedPreferences; import android.content.pm.ActivityInfo; import android.database.Cursor; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Color; import android.graphics.ImageFormat; import android.graphics.SurfaceTexture; import android.media.MediaActionSound; import android.net.Uri; import android.os.Build; import android.os.Bundle; import im.vector.R; import im.vector.VectorApp; import im.vector.view.RecentMediaLayout; import android.hardware.Camera; import android.os.HandlerThread; import android.preference.PreferenceManager; import android.provider.MediaStore; import android.text.TextUtils; import android.util.DisplayMetrics; import android.util.Log; import android.view.Surface; import android.view.TextureView; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.RelativeLayout; import android.widget.TableLayout; import android.widget.TableRow; import android.widget.Toast; import org.matrix.androidsdk.util.ImageUtils; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.List; /** * VectorMediasPickerActivity is used to take a photo or to send an old one. */ public class VectorMediasPickerActivity extends MXCActionBarActivity implements TextureView.SurfaceTextureListener { // medias folder private static final int REQUEST_MEDIAS = 54; private static final String LOG_TAG = "VectorMedPicker"; // public keys public static final String EXTRA_SINGLE_IMAGE_MODE = "EXTRA_SINGLE_IMAGE_MODE"; // internal keys private static final String KEY_EXTRA_IS_TAKEN_IMAGE_DISPLAYED = "IS_TAKEN_IMAGE_DISPLAYED"; private static final String KEY_EXTRA_TAKEN_IMAGE_ORIGIN = "TAKEN_IMAGE_ORIGIN"; private static final String KEY_EXTRA_TAKEN_IMAGE_GALLERY_URI = "TAKEN_IMAGE_GALLERY_URI"; private static final String KEY_EXTRA_TAKEN_IMAGE_CAMERA_URL = "TAKEN_IMAGE_CAMERA_URL"; private static final String KEY_EXTRA_CAMERA_SIDE = "TAKEN_IMAGE_CAMERA_SIDE"; private static final String KEY_PREFERENCE_CAMERA_IMAGE_NAME = "KEY_PREFERENCE_CAMERA_IMAGE_NAME"; private final int IMAGE_ORIGIN_CAMERA = 1; private final int IMAGE_ORIGIN_GALLERY = 2; private final boolean UI_SHOW_TAKEN_IMAGE = true; private final boolean UI_SHOW_CAMERA_PREVIEW = false; private final int GALLERY_COLUMN_COUNT = 4; private final int GALLERY_RAW_COUNT = 3; private final double SURFACE_VIEW_HEIGHT_RATIO = 0.95; private final int GALLERY_TABLE_ITEM_SIZE = (GALLERY_COLUMN_COUNT * GALLERY_RAW_COUNT); private final int JPEG_QUALITY_MAX = 100; private final String MIME_TYPE_IMAGE_GIF = "image/gif"; private final String MIME_TYPE_IMAGE = "image"; /** * define a recent media */ private class RecentMedia { public Uri mFileUri; public long mCreationTime; public Bitmap mThumbnail; public Boolean mIsVideo; public String mMimeType = ""; } // recents medias list private final ArrayList<RecentMedia> mMediaStoreImagesList = new ArrayList<>(); private final ArrayList<RecentMedia> mSelectedGalleryItemsList = new ArrayList<>(); // camera object private Camera mCamera; private int mCameraId; private int mCameraOrientation = 0; // graphical items private ImageView mSwitchCameraImageView; // camera preview and gallery selection layout private View mPreviewScrollView; private ImageView mTakeImageView; private TableLayout mGalleryTableLayout; private RelativeLayout mCameraPreviewLayout; private TextureView mCameraTextureView; private SurfaceTexture mSurfaceTexture; private View mImagePreviewLayout; private ImageView mImagePreviewImageView; private RelativeLayout mPreviewAndGalleryLayout; private int mGalleryImageCount; private int mScreenWidth; // lifecycle management variable private boolean mIsTakenImageDisplayed; private int mTakenImageOrigin; private String mShootedPicturePath; private int mCameraPreviewHeight = 0; /** * The recent requests are performed in a dedicated thread */ private HandlerThread mHandlerThread; private android.os.Handler mFileHandler; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_vector_medias_picker); if (CommonActivityUtils.shouldRestartApp(this)) { Log.e(LOG_TAG, "Restart the application."); CommonActivityUtils.restartApp(this); return; } mCameraId = Camera.CameraInfo.CAMERA_FACING_BACK; // camera preview mPreviewScrollView = findViewById(R.id.medias_picker_scrollView); mSwitchCameraImageView = (ImageView) findViewById(R.id.medias_picker_switch_camera); mCameraTextureView = (TextureView) findViewById(R.id.medias_picker_texture_view); mCameraTextureView.setSurfaceTextureListener(this); // image preview mImagePreviewLayout = findViewById(R.id.medias_picker_preview); mImagePreviewImageView = (ImageView) findViewById(R.id.medias_picker_preview_image_view); mTakeImageView = (ImageView) findViewById(R.id.medias_picker_camera_button); mGalleryTableLayout = (TableLayout)findViewById(R.id.gallery_table_layout); // mSwitchCameraImageView.setVisibility((Camera.getNumberOfCameras() > 1) ? View.VISIBLE : View.GONE); // click action mSwitchCameraImageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { VectorMediasPickerActivity.this.onSwitchCamera(); } }); mTakeImageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { VectorMediasPickerActivity.this.onClickTakeImage(); } }); findViewById(R.id.medias_picker_cancel_take_picture_imageview).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { VectorMediasPickerActivity.this.cancelTakeImage(); } }); findViewById(R.id.medias_picker_attach_take_picture_imageview).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { VectorMediasPickerActivity.this.attachImageFrom(mTakenImageOrigin); } }); initCameraLayout(); // setup separate thread for image gallery update mHandlerThread = new HandlerThread("VectorMediasPickerActivityThread"); mHandlerThread.start(); mFileHandler = new android.os.Handler(mHandlerThread.getLooper()); if(!restoreInstanceState(savedInstanceState)){ // default UI: if a taken image is not in preview, then display: live camera preview + "take picture"/switch/exit buttons updateUiConfiguration(UI_SHOW_CAMERA_PREVIEW, IMAGE_ORIGIN_CAMERA); } // Force screen orientation be managed by the sensor in case user's setting turned off // sensor-based rotation setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_SENSOR); } /** * Init the camera layout to make the surface texture + the gallery layout, both * enough large to enable scrolling. */ private void initCameraLayout() { DisplayMetrics metrics = new DisplayMetrics(); getWindowManager().getDefaultDisplay().getMetrics(metrics); int screenHeight = metrics.heightPixels; mScreenWidth = metrics.widthPixels; mCameraPreviewHeight = (int)(screenHeight * SURFACE_VIEW_HEIGHT_RATIO); // set the height of the relative layout containing the texture view mCameraPreviewLayout = (RelativeLayout)findViewById(R.id.medias_picker_camera_preview_layout); ViewGroup.LayoutParams previewLayoutParams = mCameraPreviewLayout.getLayoutParams(); previewLayoutParams.height = mCameraPreviewHeight; mCameraPreviewLayout.setLayoutParams(previewLayoutParams); // set the height of the layout including the texture view and the gallery (total sum > screen height to allow scrolling) mPreviewAndGalleryLayout = (RelativeLayout)findViewById(R.id.medias_picker_preview_gallery_layout); computePreviewAndGalleryHeight(); } /** * Compute the height of the view containing the texture and the table layout. * This height is the sum of mCameraPreviewHeight + gallery height. * The gallery height depends of the number of the gallery rows {@link #getGalleryRowsCount()}). */ private void computePreviewAndGalleryHeight() { int galleryRowsCount = getGalleryRowsCount(); if(null != mPreviewAndGalleryLayout) { ViewGroup.LayoutParams previewAndGalleryLayoutParams = mPreviewAndGalleryLayout.getLayoutParams(); int galleryHeight = (galleryRowsCount * mScreenWidth / GALLERY_COLUMN_COUNT); previewAndGalleryLayoutParams.height = mCameraPreviewHeight + galleryHeight; mPreviewAndGalleryLayout.setLayoutParams(previewAndGalleryLayoutParams); } else Log.w(LOG_TAG, "## computePreviewAndGalleryHeight(): GalleryTable height not set"); } /** * Exit activity handler. * @param aView view */ public void onExitButton(@SuppressWarnings("UnusedParameters") View aView) { finish(); } @Override protected void onDestroy() { super.onDestroy(); if (null != mHandlerThread) { mHandlerThread.quit(); mHandlerThread = null; } } @Override protected void onPause() { super.onPause(); // cancel the camera use // to avoid locking it if (null != mCamera) { mCamera.stopPreview(); } } @Override protected void onResume() { super.onResume(); // update the gallery height, to follow // the content of the device gallery computePreviewAndGalleryHeight(); // update gallery content refreshRecentsMediasList(); startCameraPreview(); } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); // save camera UI configuration outState.putBoolean(KEY_EXTRA_IS_TAKEN_IMAGE_DISPLAYED, mIsTakenImageDisplayed); outState.putInt(KEY_EXTRA_TAKEN_IMAGE_ORIGIN, mTakenImageOrigin); outState.putInt(KEY_EXTRA_CAMERA_SIDE, mCameraId); // save image preview that may be currently displayed: // -camera flow outState.putString(KEY_EXTRA_TAKEN_IMAGE_CAMERA_URL, mShootedPicturePath); // -gallery flow Uri uriImage = (Uri) mImagePreviewImageView.getTag(); outState.putParcelable(KEY_EXTRA_TAKEN_IMAGE_GALLERY_URI, uriImage); } private boolean restoreInstanceState(Bundle savedInstanceState) { boolean isRestoredInstance = false; if(null != savedInstanceState){ isRestoredInstance = true; mIsTakenImageDisplayed = savedInstanceState.getBoolean(KEY_EXTRA_IS_TAKEN_IMAGE_DISPLAYED); mShootedPicturePath = savedInstanceState.getString(KEY_EXTRA_TAKEN_IMAGE_CAMERA_URL); mTakenImageOrigin = savedInstanceState.getInt(KEY_EXTRA_TAKEN_IMAGE_ORIGIN); // restore gallery image preview (the image can be saved from the preview even after rotation) Uri uriImage = savedInstanceState.getParcelable(KEY_EXTRA_TAKEN_IMAGE_GALLERY_URI); mImagePreviewImageView.setTag(uriImage); // display a preview image? if (mIsTakenImageDisplayed) { Bitmap savedBitmap = VectorApp.getSavedPickerImagePreview(); if (null != savedBitmap) { // image preview from camera only mImagePreviewImageView.setImageBitmap(savedBitmap); } else { // image preview from gallery or camera (mShootedPicturePath) displayImagePreview(mShootedPicturePath, uriImage, mTakenImageOrigin); } } // restore UI display updateUiConfiguration(mIsTakenImageDisplayed, mTakenImageOrigin); // general data to be restored mCameraId = savedInstanceState.getInt(KEY_EXTRA_CAMERA_SIDE); } return isRestoredInstance; } /** * Result handler associated to {@link #openFileExplorer()} request. * This method returns the selected image to the calling activity. * * @param requestCode request ID * @param resultCode operation status * @param data data passed from the called activity */ @SuppressLint("NewApi") @Override protected void onActivityResult(int requestCode, int resultCode, final Intent data) { super.onActivityResult(requestCode, resultCode, data); if (resultCode == RESULT_OK) { if (requestCode == REQUEST_MEDIAS) { // provide the Uri Intent intent = new Intent(); intent.setData(data.getData()); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) { intent.setClipData(data.getClipData()); } // clean footprint in App VectorApp.setSavedCameraImagePreview(null); //intent.putExtras(conData); setResult(RESULT_OK, intent); finish(); } } } /** * Populate mMediaStoreImagesList with the images retrieved from the MediaStore. * Max number of retrieved images is set to GALLERY_TABLE_ITEM_SIZE. */ private void addImagesThumbnails() { final String[] projection = {MediaStore.Images.ImageColumns._ID, MediaStore.Images.ImageColumns.DATE_TAKEN, MediaStore.Images.ImageColumns.MIME_TYPE}; Cursor thumbnailsCursor = null; try { thumbnailsCursor = this.getContentResolver().query(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, projection, // Which columns to return null, // Return all image files null, MediaStore.Images.ImageColumns.DATE_TAKEN + " DESC LIMIT "+ GALLERY_TABLE_ITEM_SIZE); } catch (Exception e) { Log.e(LOG_TAG, "addImagesThumbnails" + e.getLocalizedMessage()); } if (null != thumbnailsCursor) { int timeIndex = thumbnailsCursor.getColumnIndex(MediaStore.Images.ImageColumns.DATE_TAKEN); int idIndex = thumbnailsCursor.getColumnIndex(MediaStore.Images.ImageColumns._ID); int mimeTypeIndex = thumbnailsCursor.getColumnIndex(MediaStore.Images.ImageColumns.MIME_TYPE); if (thumbnailsCursor.moveToFirst()) { do { try { RecentMedia recentMedia = new RecentMedia(); recentMedia.mIsVideo = false; String id = thumbnailsCursor.getString(idIndex); String dateAsString = thumbnailsCursor.getString(timeIndex); recentMedia.mMimeType = thumbnailsCursor.getString(mimeTypeIndex); recentMedia.mCreationTime = Long.parseLong(dateAsString); recentMedia.mThumbnail = MediaStore.Images.Thumbnails.getThumbnail(this.getContentResolver(), Long.parseLong(id), MediaStore.Images.Thumbnails.MINI_KIND, null); recentMedia.mFileUri = Uri.parse(MediaStore.Images.Media.EXTERNAL_CONTENT_URI.toString() + "/" + id); int rotationAngle = ImageUtils.getRotationAngleForBitmap(VectorMediasPickerActivity.this, recentMedia.mFileUri); if (0 != rotationAngle) { android.graphics.Matrix bitmapMatrix = new android.graphics.Matrix(); bitmapMatrix.postRotate(rotationAngle); recentMedia.mThumbnail = Bitmap.createBitmap(recentMedia.mThumbnail, 0, 0, recentMedia.mThumbnail.getWidth(), recentMedia.mThumbnail.getHeight(), bitmapMatrix, false); } // Note: getThumbnailUriFromMediaStorage() can return null for non jpeg images (ie png). // We could then use the bitmap(mThumbnail)) that is never null, but with no rotation applied mMediaStoreImagesList.add(recentMedia); } catch (Exception e) { Log.e(LOG_TAG, "## addImagesThumbnails(): Msg=" + e.getMessage()); } } while (thumbnailsCursor.moveToNext()); } thumbnailsCursor.close(); } Log.d(LOG_TAG, "## addImagesThumbnails(): Added count=" + mMediaStoreImagesList.size()); } private int getMediaStoreImageCount(){ int retValue = 0; Cursor thumbnailsCursor = null; try { thumbnailsCursor = this.getContentResolver().query(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, null, // no projection null, null, MediaStore.Images.ImageColumns.DATE_TAKEN + " DESC LIMIT "+ GALLERY_TABLE_ITEM_SIZE); } catch (Exception e) { Log.e(LOG_TAG, "## getMediaStoreImageCount() Exception Msg=" + e.getLocalizedMessage()); } if (null != thumbnailsCursor) { retValue = thumbnailsCursor.getCount(); thumbnailsCursor.close(); } return retValue; } private int getGalleryRowsCount() { int rowsCountRetVal; mGalleryImageCount = getMediaStoreImageCount(); if((0==mGalleryImageCount) || (0 != (mGalleryImageCount%GALLERY_COLUMN_COUNT))) { rowsCountRetVal = (mGalleryImageCount/GALLERY_COLUMN_COUNT) +1; } else { rowsCountRetVal = mGalleryImageCount/GALLERY_COLUMN_COUNT; mGalleryImageCount--; // save one cell for the folder icon } return rowsCountRetVal; } /** * Populate the gallery view with the image/video contents. */ private void refreshRecentsMediasList() { // start the pregress bar and disable the take button final RelativeLayout progressBar = (RelativeLayout)(findViewById(R.id.medias_preview_progress_bar_layout)); progressBar.setVisibility(View.VISIBLE); mTakeImageView.setEnabled(false); mTakeImageView.setAlpha(CommonActivityUtils.UTILS_OPACITY_HALF); mMediaStoreImagesList.clear(); // run away from the UI thread mFileHandler.post(new Runnable() { @Override public void run() { // populate the image thumbnails from multimedia store addImagesThumbnails(); Collections.sort(mMediaStoreImagesList, new Comparator<RecentMedia>() { @Override public int compare(RecentMedia r1, RecentMedia r2) { long t1 = r1.mCreationTime; long t2 = r2.mCreationTime; // sort from the most recent return -(t1 < t2 ? -1 : (t1 == t2 ? 0 : 1)); } }); // update the UI part VectorMediasPickerActivity.this.runOnUiThread(new Runnable() { @Override public void run() { buildGalleryImageTableLayout(); progressBar.setVisibility(View.GONE); mTakeImageView.setEnabled(true); mTakeImageView.setAlpha(CommonActivityUtils.UTILS_OPACITY_NONE); } }); } }); } /** * Build the image gallery widget programmatically. */ private void buildGalleryImageTableLayout() { final int CELL_MARGIN = 2; TableRow tableRow = null; RecentMediaLayout recentImageView; int tableLayoutWidth; int cellWidth; int cellHeight; int itemIndex; ImageView.ScaleType scaleType; TableRow.LayoutParams rawLayoutParams; TableLayout.LayoutParams tableLayoutParams = new TableLayout.LayoutParams(); if(null != mGalleryTableLayout) { mGalleryTableLayout.removeAllViews(); mGalleryTableLayout.setBackgroundColor(Color.WHITE); DisplayMetrics metrics = new DisplayMetrics(); getWindowManager().getDefaultDisplay().getMetrics(metrics); tableLayoutWidth = metrics.widthPixels; // raw layout configuration cellWidth = (tableLayoutWidth -(GALLERY_COLUMN_COUNT * CELL_MARGIN)) / GALLERY_COLUMN_COUNT; cellHeight = cellWidth; if(0 == tableLayoutWidth) { // fall back scaleType = ImageView.ScaleType.FIT_XY; rawLayoutParams = new TableRow.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT); } else { scaleType = ImageView.ScaleType.FIT_CENTER; rawLayoutParams = new TableRow.LayoutParams(cellWidth, cellHeight); } rawLayoutParams.setMargins(CELL_MARGIN, 0, CELL_MARGIN, 0); tableLayoutParams.setMargins(CELL_MARGIN, CELL_MARGIN, CELL_MARGIN, CELL_MARGIN); RecentMedia recentMedia; // loop to produce full raws filled in, with an icon folder in last cell for(itemIndex=0; itemIndex<mGalleryImageCount; itemIndex++) { try { recentMedia = mMediaStoreImagesList.get(itemIndex); } catch (IndexOutOfBoundsException e) { recentMedia = null; } // detect raw is complete if (0 == (itemIndex % GALLERY_COLUMN_COUNT)) { if (null != tableRow) { mGalleryTableLayout.addView(tableRow, tableLayoutParams); } tableRow = new TableRow(this); } // build the content layout for each cell if(null != recentMedia) { recentImageView = new RecentMediaLayout(this); if (null != recentMedia.mThumbnail) { recentImageView.setThumbnail(recentMedia.mThumbnail); } else { recentImageView.setThumbnailByUri(recentMedia.mFileUri); } recentImageView.setBackgroundColor(Color.BLACK); recentImageView.setThumbnailScaleType(scaleType); final RecentMedia finalRecentMedia = recentMedia; recentImageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { onClickGalleryImage(finalRecentMedia); } }); // set image logo: gif, image or video recentImageView.enableGifLogoImage(MIME_TYPE_IMAGE_GIF.equals(recentMedia.mMimeType)); recentImageView.enableMediaTypeLogoImage(!MIME_TYPE_IMAGE_GIF.equals(recentMedia.mMimeType)); recentImageView.setIsVideo(recentMedia.mMimeType.contains(MIME_TYPE_IMAGE)); if(null != tableRow) tableRow.addView(recentImageView, rawLayoutParams); } } // add the icon folder in last cell recentImageView = new RecentMediaLayout(this); recentImageView.setThumbnailScaleType(scaleType); recentImageView.setThumbnailByResource(R.drawable.ic_material_folder_green_vector); recentImageView.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { openFileExplorer(); } }); if(0 == itemIndex) { tableRow = new TableRow(this); } if(null != tableRow) tableRow.addView(recentImageView, rawLayoutParams); // do not forget to add last row if (null != tableRow) mGalleryTableLayout.addView(tableRow, tableLayoutParams); } else { Log.w(LOG_TAG, "## buildGalleryImageTableLayout(): failure - TableLayout widget missing"); } } private void onClickGalleryImage(final RecentMedia aMediaItem){ mCamera.stopPreview(); // add the selected image to be returned by the activity mSelectedGalleryItemsList.add(aMediaItem); // display the image as preview if (null != aMediaItem.mThumbnail) { updateUiConfiguration(UI_SHOW_TAKEN_IMAGE, IMAGE_ORIGIN_GALLERY); mImagePreviewImageView.setImageBitmap(aMediaItem.mThumbnail); // save bitmap to speed up UI restore (life cycle) VectorApp.setSavedCameraImagePreview(aMediaItem.mThumbnail); } else if(null != aMediaItem.mFileUri) { // fall back in case bitmap is not available (unlikely..) displayImagePreview(null, aMediaItem.mFileUri, IMAGE_ORIGIN_GALLERY); } else { Log.e(LOG_TAG, "## onClickGalleryImage(): no image to display"); } // save the uri to be accessible for life cycle management mImagePreviewImageView.setTag(aMediaItem.mFileUri); } /** * Take a photo */ private void takePhoto() { Log.d(LOG_TAG, "## takePhoto"); try { mCamera.takePicture(null, null, new Camera.PictureCallback() { @Override public void onPictureTaken(byte[] data, Camera camera) { Log.d(LOG_TAG, "## onPictureTaken(): success"); ByteArrayInputStream inputStream = new ByteArrayInputStream(data); File dstFile; String fileName = getSavedImageName(); // remove any previously saved image if (!TextUtils.isEmpty(fileName)) { dstFile = new File(getCacheDir().getAbsolutePath(), fileName); if (dstFile.exists()) { dstFile.delete(); } } // get new name fileName = buildNewImageName(); dstFile = new File(getCacheDir().getAbsolutePath(), fileName); // Copy source file to destination FileOutputStream outputStream = null; try { dstFile.createNewFile(); outputStream = new FileOutputStream(dstFile); byte[] buffer = new byte[1024 * 10]; int len; while ((len = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, len); } mShootedPicturePath = dstFile.getAbsolutePath(); displayImagePreview(mShootedPicturePath, null, IMAGE_ORIGIN_CAMERA); // force to stop preview: // some devices do not stop preview after the picture was taken (ie. G6 edge) mCamera.stopPreview(); Log.d(LOG_TAG, "onPictureTaken processed"); } catch (Exception e) { Toast.makeText(VectorMediasPickerActivity.this, "Exception onPictureTaken(): " + e.getLocalizedMessage(), Toast.LENGTH_SHORT).show(); } finally { // Close resources try { inputStream.close(); if (outputStream != null) { outputStream.close(); } } catch (Exception e) { Log.e(LOG_TAG, "## onPictureTaken(): EXCEPTION Msg=" + e.getMessage()); } } } }); } catch (Exception e) { Log.e(LOG_TAG, "## takePicture(): EXCEPTION Msg=" + e.getMessage()); } } /** * Take a picture of the current preview */ private void onClickTakeImage() { Log.d(LOG_TAG, "onClickTakeImage"); if (null != mCamera) { try { List<String> supportedFocusModes = null; if (null != mCamera.getParameters()) { supportedFocusModes = mCamera.getParameters().getSupportedFocusModes(); } Log.d(LOG_TAG, "onClickTakeImage : supported focus modes " + supportedFocusModes); if ((null != supportedFocusModes) && (supportedFocusModes.indexOf(Camera.Parameters.FOCUS_MODE_AUTO) >= 0)) { Log.d(LOG_TAG, "onClickTakeImage : autofocus starts"); mCamera.autoFocus(new Camera.AutoFocusCallback() { public void onAutoFocus(boolean success, Camera camera) { if (!success) { Log.e(LOG_TAG, "## autoFocus(): fails"); } else { Log.d(LOG_TAG, "## autoFocus(): succeeds"); } playShutterSound(); // take a photo event if the autofocus fails takePhoto(); } }); } else { Log.d(LOG_TAG, "onClickTakeImage : no autofocus : take photo"); playShutterSound(); takePhoto(); } } catch (Exception e) { Log.e(LOG_TAG, "## autoFocus(): EXCEPTION Msg=" + e.getMessage()); // take a photo event if the autofocus fails playShutterSound(); takePhoto(); } } } private String buildNewImageName(){ String nameRetValue; // build name based on the date //String fileSufixTime = DateFormat.getDateTimeInstance().format(new Date()); SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd_hhmmss") ; String fileSufixTime = dateFormat.format(new Date()); //fileSufixTime += "_"+ (SystemClock.uptimeMillis()/1000); nameRetValue = "VectorImage_"+fileSufixTime+".jpg"; // save new name in preference SharedPreferences preferences = PreferenceManager.getDefaultSharedPreferences(this); SharedPreferences.Editor editor = preferences.edit(); editor.putString(KEY_PREFERENCE_CAMERA_IMAGE_NAME, nameRetValue); editor.commit(); return nameRetValue; } private String getSavedImageName(){ SharedPreferences preferences = PreferenceManager.getDefaultSharedPreferences(this); return preferences.getString(KEY_PREFERENCE_CAMERA_IMAGE_NAME, null); } /** * Create a thumbnail bitmap from an image URL if there is some exif metadata which implies to rotate * the image. This method is used to process the image taken by the from the camera. * @param aImageUrl the image url * @return a thumbnail if the exif metadata implies to rotate the image. */ private Bitmap createPhotoThumbnail(final String aImageUrl) { Bitmap bitmapRetValue = null; final int MAX_SIZE = 1024, SAMPLE_SIZE = 0, QUALITY = 100; // sanity check if (null != aImageUrl) { Uri imageUri = Uri.fromFile(new File(aImageUrl)); int rotationAngle = ImageUtils.getRotationAngleForBitmap(VectorMediasPickerActivity.this, imageUri); // the exif metadata implies a rotation if (0 != rotationAngle) { // create a thumbnail BitmapFactory.Options options = new BitmapFactory.Options(); options.inPreferredConfig = Bitmap.Config.ARGB_8888; options.outWidth = -1; options.outHeight = -1; try { final String filename = imageUri.getPath(); FileInputStream imageStream = new FileInputStream(new File(filename)); // create a thumbnail InputStream stream = ImageUtils.resizeImage(imageStream, MAX_SIZE, SAMPLE_SIZE, QUALITY); imageStream.close(); Bitmap bitmap = BitmapFactory.decodeStream(stream, null, options); // apply a rotation android.graphics.Matrix bitmapMatrix = new android.graphics.Matrix(); bitmapMatrix.postRotate(rotationAngle); bitmapRetValue = Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), bitmapMatrix, false); System.gc(); } catch (OutOfMemoryError e) { Log.e(LOG_TAG, "## createPhotoThumbnail : out of memory"); } catch (Exception e) { Log.e(LOG_TAG, "## createPhotoThumbnail() Exception Msg=" + e.getMessage()); } } } return bitmapRetValue; } /** * Display the image preview. * * @param aCameraImageUrl image from camera * @param aGalleryImageUri image ref as an Uri * @param aOrigin CAMERA or GALLERY */ private void displayImagePreview(final String aCameraImageUrl, final Uri aGalleryImageUri, final int aOrigin){ final RelativeLayout progressBar = (RelativeLayout)(findViewById(R.id.medias_preview_progress_bar_layout)); progressBar.setVisibility(View.VISIBLE); mTakeImageView.setEnabled(false); Bitmap newBitmap = null; Uri defaultUri; if (IMAGE_ORIGIN_CAMERA == aOrigin) { newBitmap = mCameraTextureView.getBitmap(); if (null == newBitmap) { newBitmap = createPhotoThumbnail(aCameraImageUrl); } defaultUri = Uri.fromFile(new File(aCameraImageUrl)); } else { // in gallery defaultUri = aGalleryImageUri; } // save bitmap to speed up UI restore (life cycle) VectorApp.setSavedCameraImagePreview(newBitmap); // update the UI part if (null != newBitmap) {// from camera mImagePreviewImageView.setImageBitmap(newBitmap); } else { if(null != defaultUri) { mImagePreviewImageView.setImageURI(defaultUri); } } mTakeImageView.setEnabled(true); updateUiConfiguration(UI_SHOW_TAKEN_IMAGE, aOrigin); progressBar.setVisibility(View.GONE); } /** * Update the UI according to camera action. Two UIs are displayed: * the camera real time preview (default configuration) or the taken picture. * (the taken picture comes from the camera or from the gallery) * * When the taken image is displayed, only two buttons are displayed: "attach" * the current image or "re take"(cancel) another image with the camera. * We also have to distinguish the origin of the taken image: from the camera * or from the gallery. * * @param aIsTakenImageDisplayed true to display the taken image, false to show the camera preview * @param aImageOrigin IMAGE_ORIGIN_CAMERA or IMAGE_ORIGIN_GALLERY */ private void updateUiConfiguration(boolean aIsTakenImageDisplayed, int aImageOrigin){ // save current configuration for lifecyle management mIsTakenImageDisplayed = aIsTakenImageDisplayed; mTakenImageOrigin = aImageOrigin; if (!aIsTakenImageDisplayed) { // clear the selected image from the gallery (if any) mSelectedGalleryItemsList.clear(); } if (aIsTakenImageDisplayed) { mImagePreviewLayout.setVisibility(View.VISIBLE); mPreviewScrollView.setVisibility(View.GONE); } else { // the default UI: hide gallery preview, show the surface view mPreviewScrollView.setVisibility(View.VISIBLE); mImagePreviewLayout.setVisibility(View.GONE); } } /** * Start the camera preview */ private void startCameraPreview() { try { if (null != mCamera) { mCamera.startPreview(); } } catch (Exception ex){ Log.w(LOG_TAG,"## startCameraPreview(): Exception Msg="+ ex.getMessage()); } } /** * Cancel the current image preview, and setup the UI to * start a new image capture. */ private void cancelTakeImage() { mShootedPicturePath = null; mSelectedGalleryItemsList.clear(); VectorApp.setSavedCameraImagePreview(null); startCameraPreview(); // reset UI ot default: "take picture" button screen updateUiConfiguration(UI_SHOW_CAMERA_PREVIEW, IMAGE_ORIGIN_CAMERA); } /** * "attach image" dispatcher. * * @param aImageOrigin camera, otherwise gallery */ private void attachImageFrom(int aImageOrigin) { if(IMAGE_ORIGIN_CAMERA == aImageOrigin){ attachImageFromCamera(); } else if(IMAGE_ORIGIN_GALLERY == aImageOrigin){ attachImageFrommGallery(); } else { Log.w(LOG_TAG,"## attachImageFrom(): unknown image origin"); } } /** * Returns the thumbnail path of shot image. * @param picturePath the image path * @return the thumbnail image path. */ public static String getThumbnailPath(String picturePath) { if (!TextUtils.isEmpty(picturePath) && picturePath.endsWith(".jpg")) { return picturePath.replace(".jpg", "_thumb.jpg"); } return null; } /** * Return the taken image from the camera to the calling activity. * This method returns to the calling activity. */ private void attachImageFromCamera() { try { // sanity check if (null != mShootedPicturePath) { try { Bitmap previewBitmap = VectorApp.getSavedPickerImagePreview(); String thumbnailPath = getThumbnailPath(mShootedPicturePath); File file = new File(thumbnailPath); FileOutputStream outStream = new FileOutputStream(file); previewBitmap.compress(Bitmap.CompressFormat.JPEG, 50, outStream); outStream.flush(); outStream.close(); } catch (Exception e) { Log.e(LOG_TAG, "attachImageFromCamera fails to create thumbnail file"); } Uri uri = Uri.fromFile(new File(mShootedPicturePath)); // provide the Uri Bundle conData = new Bundle(); Intent intent = new Intent(); intent.setData(uri); intent.putExtras(conData); setResult(RESULT_OK, intent); } } catch (Exception e) { setResult(RESULT_CANCELED, null); } finally { // clean footprint in App VectorApp.setSavedCameraImagePreview(null); finish(); } } private void openFileExplorer() { try { Intent fileIntent = new Intent(Intent.ACTION_PICK); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2) { fileIntent.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, false); } // no mime type filter to allow any kind of content fileIntent.setType(CommonActivityUtils.MIME_TYPE_IMAGE_ALL); startActivityForResult(fileIntent, REQUEST_MEDIAS); } catch(Exception e) { Toast.makeText(VectorMediasPickerActivity.this, e.getLocalizedMessage(), Toast.LENGTH_LONG).show(); } } /** * Return the taken image from the gallery to the calling activity. * This method returns to the calling activity. */ @SuppressLint("NewApi") private void attachImageFrommGallery() { Bundle conData = new Bundle(); Intent intent = new Intent(); if ((mSelectedGalleryItemsList.size() == 1) || (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN_MR2)) { // provide the Uri intent.setData(mSelectedGalleryItemsList.get(0).mFileUri); } else if (mSelectedGalleryItemsList.size() > 0) { ClipData.Item firstUri = new ClipData.Item(null, null, null, mSelectedGalleryItemsList.get(0).mFileUri); String[] mimeType = { "*/*" }; ClipData clipData = new ClipData("", mimeType, firstUri); for(int index = 1; index < mSelectedGalleryItemsList.size(); index++) { ClipData.Item item = new ClipData.Item(null, null, null, mSelectedGalleryItemsList.get(index).mFileUri); clipData.addItem(item); } intent.setClipData(clipData); } else { // attach after a screen rotation, the file uri must was saved in the tag Uri uriSavedFromLifeCycle = (Uri) mImagePreviewImageView.getTag(); if (null != uriSavedFromLifeCycle) { intent.setData(uriSavedFromLifeCycle); } } intent.putExtras(conData); setResult(RESULT_OK, intent); // clean footprint in App VectorApp.setSavedCameraImagePreview(null); finish(); } /** * Switch camera (front <-> back) */ private void onSwitchCamera() { // can only switch if the device has more than two camera if (Camera.getNumberOfCameras() >= 2) { // stop camera if (null != mCameraTextureView) { mCamera.stopPreview(); } mCamera.release(); if (mCameraId == Camera.CameraInfo.CAMERA_FACING_BACK) { mCameraId = Camera.CameraInfo.CAMERA_FACING_FRONT; } else { mCameraId = Camera.CameraInfo.CAMERA_FACING_BACK; } try { mCamera = Camera.open(mCameraId); // set the full quality picture, rotation angle initCameraSettings(); try { mCamera.setPreviewTexture(mSurfaceTexture); } catch (IOException e) { Log.e(LOG_TAG, "## onSwitchCamera(): setPreviewTexture EXCEPTION Msg=" + e.getMessage()); } mCamera.startPreview(); } catch (Exception e) { Log.e(LOG_TAG, "## onSwitchCamera(): cannot init the other camera"); // assume that only one camera can be used. mSwitchCameraImageView.setVisibility(View.GONE); onSwitchCamera(); } } } /** * Define the camera rotation (preview and recording). */ private void initCameraSettings() { android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo(); android.hardware.Camera.getCameraInfo(mCameraId, info); int rotation = this.getWindowManager().getDefaultDisplay().getRotation(); int degrees = 0; switch (rotation) { case Surface.ROTATION_0: degrees = 0; break; // portrait case Surface.ROTATION_90: degrees = 90; break; // landscape case Surface.ROTATION_180: degrees = 180; break; case Surface.ROTATION_270: degrees = 270; break; // landscape } int previewRotation; int imageRotation; if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) { imageRotation = previewRotation = (info.orientation + degrees) % 360; previewRotation = (360 - previewRotation) % 360; // compensate the mirror } else { // back-facing imageRotation = previewRotation = (info.orientation - degrees + 360) % 360; } mCameraOrientation = previewRotation; mCamera.setDisplayOrientation(previewRotation); Camera.Parameters params = mCamera.getParameters(); // apply the rotation params.setRotation(imageRotation); // set the best quality List<Camera.Size> supportedSizes = params.getSupportedPictureSizes(); if (supportedSizes.size() > 0) { // search the highest image quality // they are not always sorted in the same order (sometimes it is asc sort ..) Camera.Size maxSizePicture = supportedSizes.get(0); long mult = maxSizePicture.width * maxSizePicture.height; for(int i = 1; i < supportedSizes.size(); i++) { Camera.Size curSizePicture = supportedSizes.get(i); long curMult = curSizePicture.width * curSizePicture.height; if (curMult > mult) { mult = curMult; maxSizePicture = curSizePicture; } } // and use it. params.setPictureSize(maxSizePicture.width, maxSizePicture.height); } try { mCamera.setParameters(params); } catch (Exception e) { Log.e(LOG_TAG, "## initCameraSettings(): set size fails EXCEPTION Msg=" + e.getMessage()); } // set auto focus try { params.setFocusMode(Camera.Parameters.FOCUS_MODE_AUTO); mCamera.setParameters(params); } catch (Exception e) { Log.e(LOG_TAG, "## initCameraSettings(): set auto focus fails EXCEPTION Msg=" + e.getMessage()); } // set jpeg quality try { params.setPictureFormat(ImageFormat.JPEG); params.setJpegQuality(JPEG_QUALITY_MAX); mCamera.setParameters(params); } catch (Exception e) { Log.e(LOG_TAG, "## initCameraSettings(): set jpeg quality fails EXCEPTION Msg=" + e.getMessage()); } } private void playShutterSound() { MediaActionSound sound = new MediaActionSound(); sound.play(MediaActionSound.SHUTTER_CLICK); } @Override public void onSurfaceTextureAvailable(SurfaceTexture surface, int width, int height) { try { mCamera = Camera.open(mCameraId); } catch (Exception e) { Log.e(LOG_TAG,"Cannot open the camera " + mCameraId); } // fall back: the camera initialisation failed if (null == mCamera) { // assume that only one camera can be used. mSwitchCameraImageView.setVisibility(View.GONE); try { mCamera = Camera.open((Camera.CameraInfo.CAMERA_FACING_BACK == mCameraId) ? Camera.CameraInfo.CAMERA_FACING_FRONT : Camera.CameraInfo.CAMERA_FACING_BACK); } catch (Exception e) { Log.e(LOG_TAG,"Cannot open the camera " + mCameraId); } } // cannot start the cam if (null == mCamera) { Log.w(LOG_TAG,"## onSurfaceTextureAvailable() camera creation failed"); return; } try { mSurfaceTexture = surface; mCamera.setPreviewTexture(surface); initCameraSettings(); Camera.Size previewSize = mCamera.getParameters().getPreviewSize(); // Valid values are 0, 90, 180, and 270 (0 = landscape) if ((mCameraOrientation == 90) || (mCameraOrientation == 270)) { int tmp = previewSize.width; previewSize.width = previewSize.height; previewSize.height = tmp; } // check that the aspect ratio is kept int sourceRatio = previewSize.height * 100 / previewSize.width; int dstRatio = height * 100 / width; // the camera preview size must fit the size provided by the surface texture if (sourceRatio != dstRatio) { int newWidth; int newHeight; newHeight = height; newWidth = (int) (((float) newHeight) * previewSize.width / previewSize.height); if (newWidth > width) { newWidth = width; newHeight = (int) (((float) newWidth) * previewSize.height / previewSize.width); } // apply the size provided by the texture to the texture layout ViewGroup.LayoutParams layout = mCameraTextureView.getLayoutParams(); layout.width = newWidth; layout.height = newHeight; mCameraTextureView.setLayoutParams(layout); if (layout.height < mCameraPreviewHeight) { mCameraPreviewHeight = layout.height; // set the height of the relative layout containing the texture view if(null != mCameraPreviewLayout) { RelativeLayout.LayoutParams previewLayoutParams = (RelativeLayout.LayoutParams)mCameraPreviewLayout.getLayoutParams(); previewLayoutParams.height = mCameraPreviewHeight; mCameraPreviewLayout.setLayoutParams(previewLayoutParams); } // define the gallery height: height of the texture view + height of the gallery (total sum > screen height to allow scrolling) computePreviewAndGalleryHeight(); } } mCamera.startPreview(); } catch (Exception e) { if (null != mCamera) { mCamera.stopPreview(); mCamera.release(); mCamera = null; } } } @Override public void onSurfaceTextureSizeChanged(SurfaceTexture surface, int width, int height) { Log.d(LOG_TAG, "## onSurfaceTextureSizeChanged(): width="+width+" height="+height); } @Override public void onSurfaceTextureUpdated(SurfaceTexture surface) { } @Override public boolean onSurfaceTextureDestroyed(SurfaceTexture surface) { mCamera.stopPreview(); mCamera.release(); mSurfaceTexture = null; mCamera = null; return true; } // ********************************************************************************************* }
Hosted by Sabrina Siddiqui. Produced by Jocelyn Frank. Later in the show, Siddiqui speaks with former ambassador Ronald E Neumann, who served under Bill Clinton and later in Bahrain and Afghanistan under George W Bush. As always, we want to hear from you. Please join the conversation in the comments below.
A Facebook user, Dasarathar upload several photos and a video recently, showing what was seemingly a clash between gang members yielding weapons, including machetes and baseball bats. According to him, the ancient happened last Friday (Mar 3) near Ulu Tiram, Johor, in Malaysia and started apparently when members of the infamous Gang 360 clashed against members of a rival gang, reports The Coverage. “When you are in a gang, you have to go through this. “It doesn’t matter if the members from an opposing gang are still alive or not. “As long as you are in gang, you and your gang have to live. “When you are in a fight or in any kind of trouble, your brothers all be there for you. “My brothers will always be there for me. Some netizens commented that the footage might be fake and the incident was staged, due to the lack of blood. The nature of the incident cannot be ascertained at the moment. Watch the video below and decide for yourself.
def filter_data(base: Any, updates: Any) -> Any: if not isinstance(updates, type(base)): return updates if not isinstance(base, dict): if base == updates: return SAME_MARK return updates new_keys = set(updates.keys()) - set(base.keys()) common_keys = set(updates.keys()).intersection(set(base.keys())) new_data = {key: updates[key] for key in new_keys} for key in common_keys: value = filter_data(base[key], updates[key]) if value is SAME_MARK: continue new_data[key] = value if not new_data: return SAME_MARK return new_data
A Note on Nondeterminism in Small, Fast Parallel Computers Nondeterministic analogues of the well-known language classes NC and SC called NNC and NSC, respectively, are investigated. NC is the class of languages that can be accepted by small, fast parallel computers; SC is the class of languages that can be recognized by a deterministic Turing machine in polynomial time and polylog tape-head reversals. Adding nondeterminism to SC leaves it in the domain of parallel computation since NSC contained in POLYLOGSPACE. That is, NSC is a subset of the class of languages computable by fast parallel computers. Adding nondeterminism to NC appears to make it much more powerful since NNC=NP. It is clear that NSC contained in NNC, and probable that NSC contained in/implied by NNC. Further evidence for this conjecture is provided by showing that NSC is precisely the class of languages recognizable in simultaneous polynomial time and polylog reversals by a nondeterministic Turing machine with a read-only input tape and a single read-write work tape; it is known that NNC is similar, but is recognizable by a Turing machine with two read-write tapes. >
Many of you will have heard of the Asus Transformer Pad TF300 tablet and today we can bring you news that Asus has just launched a successor, the Transformer Pad TF300TL. The specs and features of the latest Transformer Pad are pretty much the same in most respects but the new TF300TL does have 4G LTE connectivity so should be warmly welcomed. The Transformer Pad TF300 had much to recommend it and so the TF300TL with high-speed connectivity should also win many fans as it sports an Nvidia Tegra 3 quad-core processor and 12-core GeForce graphics, a decent oomph power-wise then. The Transformer Pad TF300 also boasts a 10.1-inch WXGA display capable of playing 1080p HD video and comes in 16 or 32GB variants, while the battery life should give users around 9 hours of use and that sounds pretty good considering it’s an LTE-equipped tablet. Camera-wise you’re looking at an 8-megapixel rear camera with 5-element lens and 1080p video capture and there’s also a 1.2-megapixel front-facer, as reported by Ubergizmo. The Transformer Pad TF300TL is launched with AT&T data plans and the base 16GB model is $499, that’s around $120 more than the Transformer Pad TF300. However Ubergizmo also points out that price and availability may be different across other regions. There’s also a useful mobile dock that costs $149 and will up the battery life of the tablet by around 3 hours. If you’re a serious computer user the LTE-equipped Transformer Pad TF300TL could be a really good choice for you and comes just in time for the holiday season. What are your thoughts on this latest Asus tablet? Could the addition of LTE connectivity tempt you despite the price of $499? We’re interested to hear from you so let us have your comments on the TF300TL tablet.
/** * Get the definition for this property. * * @return the cached property definition ID; never null * @throws ItemNotFoundException if the node that contains this property doesn't exist anymore * @throws ConstraintViolationException if no valid property definition could be found * @throws InvalidItemStateException if the node has been removed in this session's transient state */ final JcrPropertyDefinition propertyDefinition() throws ItemNotFoundException, ConstraintViolationException, InvalidItemStateException { CachedDefinition defn = cachedDefn; NodeTypes nodeTypes = session.nodeTypes(); if (defn == null || nodeTypes.getVersion() > defn.nodeTypesVersion) { Name primaryType = node.getPrimaryTypeName(); Set<Name> mixinTypes = node.getMixinTypeNames(); JcrPropertyDefinition propDefn = node.propertyDefinitionFor(property(), primaryType, mixinTypes, nodeTypes); PropertyDefinitionId id = propDefn.getId(); setPropertyDefinitionId(id, nodeTypes.getVersion()); return propDefn; } return nodeTypes.getPropertyDefinition(defn.propDefnId); }
Real-World Switching to Riociguat: Management and Practicalities in Patients with PAH and CTEPH Purpose A proportion of patients with pulmonary arterial hypertension (PAH) and chronic thromboembolic pulmonary hypertension (CTEPH) do not achieve treatment goals or experience side effects on their current therapy. In such cases, switching patients to a new drug while discontinuing the first may be a viable and appropriate treatment option. CAPTURE was designed to investigate how physicians manage the switching of patients to riociguat in real-world clinical practice. Observations from the study were used to assess whether recommendations in the riociguat prescribing information are reflected in clinical practice. Methods CAPTURE was an international, multicenter, uncontrolled, retrospective chart review that collected data from patients with PAH or inoperable or persistent/recurrent CTEPH who switched to riociguat from another pulmonary hypertension (PH)-targeted medical therapy. The primary objective of the study was to understand the procedure undertaken in real-world clinical practice for patients switching to riociguat. Results Of 127 patients screened, 125 were enrolled in CAPTURE. The majority of patients switched from a phosphodiesterase type 5 inhibitor (PDE5i) to riociguat and the most common reason for switching was lack of efficacy. Physicians were already using the recommended treatment-free period when switching patients to riociguat from sildenafil, but a slightly longer period than recommended for tadalafil. In line with the contraindication, the majority of patients did not receive riociguat and PDE5i therapy concomitantly. Physicians also followed the recommended dose-adjustment procedure for riociguat. Conclusion Switching to riociguat from another PH-targeted therapy may be feasible in real-world clinical practice in the context of the current recommendations. Electronic supplementary material The online version of this article (10.1007/s00408-018-0100-3) contains supplementary material, which is available to authorized users. For patients with PAH who do not achieve treatment goals, current European Society of Cardiology/European Respiratory Society (ESC/ERS) guidelines recommend double-or triple-sequential combination therapy. In studies investigating escalation of treatment, the initial drug is routinely continued. However, in cases where patients do not respond to the initial therapy, there are no data on whether the additional clinical effect is based on the drug combination. As such, in some cases it may be better to discontinue the original agent before starting a new therapy. This strategy of switching patients from one PH-targeted therapy to another is largely unexplored in clinical practice. Small studies and case reports have demonstrated positive outcomes after switching, but these have largely involved switching within a drug class, and were mainly due to lack of efficacy or safety and tolerability of the former drug. Riociguat is currently the only medical therapy approved for the treatment of both PAH and inoperable or persistent/recurrent CTEPH. Switching from a PDE5i to riociguat in PAH patients with an insufficient response to treatment has been explored in the RESPITE study. Results from this uncontrolled pilot study indicated that this may be a feasible and effective treatment strategy. Subgroup analysis from a CTEPH early access study and a study of 23 patients switching from PDE5i to riociguat suggest that switching from off-label PH-targeted therapy to riociguat is well tolerated in patients with CTEPH. Despite promising preliminary data, little is known about how switching to riociguat is managed in clinical practice. The CAPTURE study was designed to investigate how and why patients with PAH and inoperable or persistent/recurrent CTEPH are switched from other PH-targeted therapies to riociguat in real-world clinical practice. Data from the study were also used to assess whether recommendations from the riociguat prescribing information were in line with real-world practice. Study Design CAPTURE (clinicaltrial.gov: NCT02545465) was an international, multicenter, uncontrolled, retrospective chart review that collected data from patients with PAH or inoperable or persistent/recurrent CTEPH, who switched to riociguat from another PH-targeted therapy. Patients Male and female patients with PAH or inoperable or persistent/recurrent CTEPH who were switched to riociguat from another PH-targeted medical therapy and completed a 5-month documentation period were included. All patients were ≥ 18 years and provided written informed consent. Patients who did not switch therapy but received riociguat purely as an add-on to an ERA or prostacyclin analog were not eligible. Study Procedures Data were retrospectively collected from patient medical records for the 12-month period prior to switching and the 5-month period post-switching. For patients who discontinued riociguat, data were still collected for the 5-month post-switch period. Decisions about clinical management of each patient, including riociguat treatment duration, were determined solely by the treating physician, without influence from the study protocol. Primary Outcome Measures Primary outcome measures included information on riociguat dose adjustment during switching, vital signs during the dose-adjustment period (systolic and diastolic blood pressure and heart rate), switch medication, reason for switching, and duration of treatment-free periods between previous medication and riociguat. A treatment-free period was defined as the number of days between the last intake of the switched therapy and the first treatment with riociguat (excluding the last day of pre-switch drug intake and the first day of riociguat treatment). A treatment-free period of 0 indicates that riociguat was started 1 day after the last intake of the switched drug. A negative value for the treatment-free period indicates the switched drug was discontinued after the start of riociguat. Other Variables Patient characteristics (including baseline demographics, medical history, and clinical characteristics) and clinical parameters (6-min walking distance , Borg Dyspnea Index, World Health Organization function class , and biomarkers) were also collected. Safety Assessments Incidences of adverse events (AEs) and serious adverse events (SAEs), including those of special interest (serious hemoptysis and symptomatic hypotension), were assessed. Statistical Analyses All variables were analyzed descriptively and all patients who received at least one dose of riociguat were included in the analysis. No imputation of missing information was applied except for partial dates (full rules listed in Supplementary Information). Patient Population The total documentation period for the study was October 1, 2012 to May 31, 2016. Of the 127 patients screened, 125 were enrolled (Fig. 1). All enrolled patients completed the 5-month post-switch period and were included in the safety analysis set (SAF); 122 (98%) were included in the full analysis set (FAS) due to the exclusion of three patients for violations of the enrollment criteria. FAS data are presented, with the exception of demographic and safety-related data, where SAF data are presented. Among patients with PAH, most were receiving ERA + PDE5i (n = 24; 60%) or ERA + PDE5i + prostacyclin (n = 9; 23%) at the time of switching, and 34 patients (85%) switched PDE5i for riociguat. Among patients with CTEPH, Fig. 1 Patient disposition. a Primary reason for premature discontinuation of riociguat therapy. b Adverse events reported: Patient 1-dizziness, dyspepsia, and oxygen consumption increased; patient 2-exacerbation of pulmonary hypertension; patient 3-dyspnea and back pain. c Three patients were not included in the full analysis set due to violation of the inclusion or exclusion criteria In line with the contraindication for concomitant use of the two drugs, most patients did not receive riociguat + PDE5i. However, one patient received concomitant riociguat + PDE5i for 12 days. This patient experienced SAEs of worsening right heart failure (two episodes, both of which were resolved and not considered study-drug related); symptomatic hypotension was also reported although not during the time in which concomitant riociguat + PDE5i was administered. The stop date for PDE5i therapy was incomplete for another patient. Reason for Switch The most common reason for switching to riociguat was lack of efficacy of the prior therapy (n = 102; 84%). Other reasons included patient request and lack of tolerability ( Table 3). Duration of Treatment-Free Period The median (range) treatment-free period prior to commencing riociguat was 0 (− 24 to 51) days. The median treatmentfree period was longer in patients who switched from tadalafil, recorded as 2 (− 1 to 5) days, than in those who switched from sildenafil, recorded as 0 (− 24 to 13) days, where 0 days indicated riociguat was started the day following the last intake of switched drug ( Table 2). Due to the study imputation rules for partial or missing dates (see Supplementary Information), the patient with the incomplete stop date (day missing) led to a reported overlap of 24 days, affecting the calculation of the mean treatment-free period for switching from sildenafil to riociguat. Dose-Adjustment Procedure Riociguat was most frequently initiated at 3.0 mg/day (n = 79; 65%), followed by 1.5 mg/day (n = 20; 16%) and CTEPH chronic thromboembolic pulmonary hypertension, ERA endothelin receptor antagonist, PAH pulmonary arterial hypertension, PDE5i phosphodiesterase type 5 inhibitor a Treatment-free period was the number of days between the day of last intake of switched PH drug(s) and the day of first treatment with riociguat (excluding the last day with pre-switch PH drug intake and the first day with riociguat). If the switched drug was discontinued after the start of riociguat, the treatment-free period is negative. A treatment-free period of 0 indicates riociguat was started 1 day after the last intake of the switched drug 7.5 mg/day (n = 16; 13%); two patients (2%) were started on 1 mg/day. The mean (SD) initial dose was 3.3 (1.8) mg/day. The mean (SD) number of dose-adjustment steps was 2.4 (1.5), with the majority of patients receiving three or four dose adjustments (n = 66; 54% and n = 12; 10%, respectively). For 27 patients (22%), only the initial dose was documented. Vital signs were assessed at each doseadjustment step; mean systolic blood pressure remained above 100 mmHg during each increase in riociguat (Supplementary Table 1). The mean (SD) maintenance dose of riociguat was 6.7 (1.7) mg/day with 77% of patients achieving a maintenance dose of 7.5 mg/day within 8 weeks. Treatment-Free Period Two patients (2%), both with CTEPH, experienced an AE during the treatment-free period (liver disorder and hyperlipidemia). SAEs occurred in six patients (5%); in two patients (2%) the events were considered study-drug related: one patient experienced palpitations, a viral infection, and cardiac catheterization, and one patient experienced right ventricular failure. One patient (1%) discontinued riociguat during the dose-adjustment period and 11 patients (9%) experienced AEs that resulted in a dose reduction or interruption. Efficacy 6MWD, N-terminal pro-brain natriuretic peptide, and WHO FC at baseline and last follow-up visit are shown in Supplementary Figs. 2, 3, and 4, respectively. Discussion CAPTURE was a retrospective chart review designed to understand how patients with PAH or CTEPH switched to riociguat from other PH-targeted therapies in real-world clinical practice. The main reason for switching to riociguat was a lack of efficacy of previous PH-targeted therapies. This is in contrast to previously published studies where patients mainly switched due to comfort, safety, and tolerability [22,. However, these studies focused on switching from intravenous or subcutaneous prostacyclins to a second prostacyclin (mainly treprostinil) or an ERA, whereas in CAPTURE, 80% of patients switched from an oral PDE5i. Therefore, as well as the difference in mechanism of action between PH therapies leading to varying side effect profiles, administration procedure may also play a role in the reason for switch. It has also been reported that patients switch from sildenafil due to AEs, and in CAPTURE, 4% of the population switched due to lack of tolerability of prior therapy. In line with ESC/ERS guidelines, most patients with PAH were receiving combination therapy before switching; the majority of these switched from a PDE5i to riociguat. However, patients with inoperable or persistent/recurrent CTEPH were receiving off-label PH-targeted therapies, as riociguat is the only medical therapy approved in CTEPH. Following switching, many patients with PAH received double or triple combination therapy in conjunction with riociguat, while most patients with CTEPH switched to riociguat monotherapy. Of note, due to concomitant riociguat and PDE5i being contraindicated, most patients in CAPTURE did not receive riociguat + PDE5i in combination. One patient had overlapping treatment with a PDE5i and riociguat; however, although the patient experienced right heart failure and hypotension after PDE5i had stopped, these events resolved and riociguat treatment was completed. The US prescribing information recommends 24-and 48-h treatment-free periods for patients switching from sildenafil and tadalafil, respectively, based on the drop in systemic blood pressure caused by concomitant administration of PDE5i and nitrates, which have a similar mechanism of action to riociguat. The treatment-free periods observed for sildenafil and tadalafil in CAPTURE suggest that clinicians may be in line with the recommendations for sildenafil, but use a longer than recommended treatment-free period for tadalafil. Riociguat is administered using an 8-week individual dose-adjustment scheme, starting at 1.0 mg three times a day (tid) and is increased every 2 weeks in the absence of hypotension, to a 2.5 mg tid-maximum. CAP-TURE showed that physicians tended to adhere to this protocol, with most patients initiating treatment of riociguat at 3 mg/day. However, 24 patients were started at lower than recommended doses of riociguat. This may be due to over-cautiousness on the part of the physician regarding the contraindication and risk of hypotension with PDE5i + riociguat, as 18 of these patients switched from PDE5i therapy. Additionally, 16 patients initiated riociguat at the maximal dose of 7.5 mg/day, which may be due to physician concern for clinical worsening with lower doses. The percentage of patients receiving a 7.5 mg/day (2.5 mg tid) maintenance dose was similar in CAPTURE to the percentages in the PATENT-1 and CHEST-1 Phase III clinical trials. In CAPTURE, 77% of patients reached the maximum dose by Week 8, while in PATENT-1 and CHEST-1, the maximum dose was reached by 75% (at Week 12) and 77% (at Week 16), respectively. The data from CAPTURE suggest that switching to riociguat from other PH-targeted therapies in clinical practice may be carried out safely and is well tolerated. AEs were rarely reported during the treatment-free period, and during the dose-adjustment period, only one patient discontinued riociguat, and 11 patients experienced AEs that resulted in dose reduction or interruption. Although it is important to interpret these observations with caution based on the inherent selection bias of a retrospective chart review (discussed below), they are in line with the results of published case studies and retrospective analyses of both PAH and CTEPH patients switching from other PH-targeted therapies (mainly PDE5i) to riociguat in real-world clinical practice [18,20,. The results from CAPTURE also support preliminary data from the RESPITE clinical trial, which indicated that switching from sildenafil or tadalafil to riociguat in patients with PAH not reaching treatment goals was safe and well tolerated. A key limitation of CAPTURE is that, as a retrospective chart review, it had an inherent selection bias. Patients who died after switching but before giving informed consent for the study were not included. Although this bias applies to all chart reviews, it is important to exercise caution when interpreting the data from CAPTURE, as exclusion of patients who died means that the most unwell patients were not included in the safety analyses. Another study limitation, also owing to its retrospective nature, is the limited efficacy data, with only a small proportion of patients having postbaseline measurements for 6MWD, Borg dyspnea index, WHO FC, and biomarkers. These low patient numbers may be reflective of efficacy parameters primarily being assessed in patients who are not responding well to treatment, resulting in biased data. Moreover, clinical parameters were analyzed by original visit number and an artificial visit window scheme, meaning that the data available at each visit were highly variable. These low and variable patient numbers mean that it is almost impossible to interpret the data. In conclusion, most patients in CAPTURE were initiated and uptitrated on riociguat in line with recommendations in the label, with a similar percentage of patients achieving the maximum maintenance dose in real-world clinical practice as in the PATENT and CHEST clinical trials. No new safety signals were observed. These data suggest that switching may be feasible in the context of current recommendations. personal fees from Actelion, AstraZeneca, Bayer AG, GSK, Janssen Cilag, Lilly, Pfizer, and United Therapeutics. J-LV reports grants from Actelion, Bayer AG, GSK, Lilly, and Merck, and holding the Actelion Chair for research in PH within his institution. NT reports lecture honoraria from Actelion, Bayer Yakuhin, Daiichi-Sankyo, and being a member of an endowed department with Actelion. MH reports board membership for Actelion, Bayer AG, GSK, and Novartis; lecture fees from Actelion, AstraZeneca, Bayer AG, GSK, Lilly, MSD, Novartis, and Pfizer; and personal fees from Actelion, Bayer AG, GSK, Lilly, and Novartis. MO-L has nothing to disclose. LM reports personal fees from Actelion and Bayer AG, and consulting, speaker fees, and research funding from Actelion and Bayer AG. MC reports employment by Bayer AG. KV reports employment by Bayer AG. EG reports grants from Actelion, Bayer AG, GSK, Lilly, and Pfizer; personal fees from Bayer AG, Milteny, Novartis, and United Therapeutics; and nonfinancial support from Alexion. Ethical Approval The study was carried out within an approved indication in accordance with guidelines and regulations of EMA, FDA and applicable local laws and regulations. ICH-GCP guidelines were followed wherever possible. In all countries where reference to an IEC/ IRB is required, documented approval for appropriate IEC/IRB will be obtained for all participating centers prior to study start. Informed Consent Informed consent was obtained from all individual participants included in this study. Open Access This article is distributed under the terms of the Creative Commons Attribution 4.0 International License (http://creat iveco mmons.org/licen ses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons license, and indicate if changes were made.
. OBJECTIVE We performed a retrospective review of a series of 82 cases of Williams-Beuren syndrome (WBS) and associated diseases. MATERIAL AND METHODS A series of 82 patients (47 males and 35 females) who consulted at the hospital because of mental retardation and/or congenital cardiopathy were included. The patients were studied mainly from a neurological and cardiological point of view, and secondarily because of endocrinological and nephrological problems. Since description of the chromosomal abnormalities provoking the syndrome, we perform karyotyping in all patients with suspected WBS. RESULTS Alterations mainly consisted of distinctive facial appearance (100 %), mental retardation with friendly behavior (90 %), congenital cardiopathy (85.4 %), mostly consisting of supravalvular aortic stenosis (72 %), with (12 %) or without (60 %) pulmonary stenosis, and behavior typical of attention deficit-hyperactivity disorder, which usually manifested at the age of 4 to 5 years in both boys and girls. Approximately 90 % started to walk and speak later than average. Birthweight was below 3000 g in 65 % of the patients in whom this datum was included in the medical record. Eleven of the 13 patients (84.5 %) studied showed the typical deletion of WBS. CONCLUSION Study of patients with WBS should be multidisciplinary. Most patients require help during schooling and subsequent vocational guidance.
// Copyright (c) 2019 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package archiver provides functionality to create tar files. package archiver
#!/usr/bin/env python3 from snr.constants import info from snr.constants import keybinds from snr.constants import messages
Mahla Pearlman Early life and education Pearlman was born in Boggabri, New South Wales, the daughter of Minnie and Mark Pearlman. She attended Boggabri Public School with her younger brother Braham. She later attended MLC School in Burwood. She studied at the University of Sydney where she graduated with a Bachelor of Arts in 1957 and a Bachelor of Laws with Honours in 1960. Career Pearlman was admitted as a solicitor on 11 March 1960 and in 1981 became the first woman President of the Law Society of New South Wales. In 1989 she was the first woman President of the Law Council of Australia, a body that represents both solicitors and barristers. Land and Environment Court In 1992 Pearlman was the first woman appointed the Chief Judge of the New South Wales Land and Environment Court. The appointment of a solicitor to such a high judicial position was controversial at a time when most judges had previously been barristers. Critics of that system said limiting the pool of potential judges to barristers was too narrow and widening it to include solicitors and academics would increase the number of women judges. In 1992 Jane Mathews was the only woman out of 45 Supreme Court Judges, Leonie Glynn was the only woman out of 9 Industrial Court judges and Angela Karpin was the only woman out of 56 judges of the District Court. Despite the initial controversy, Pearlman led the Court for 11 years with Justice Terry Sheahan stating that she led the court supremely well. Honours In January 1985 Pearlman was made a Member of the Order of Australia for service to the legal profession, particularly the New South Wales Law Society. She was awarded the Centenary Medal for service to law and in June 2004 Pearlmen was made an Officer of the Order of Australia for service to the law and the judiciary, to the development of professional practice standards, and to the community. Pearlman's career, contribution and memory is honoured by the Mahla Pearlman Oration, an annual event delivered by an eminent person concerned with environment and planning law. The Mahla Pearlman Award is an annual award to a young lawyer who has made a significant contribution to environmental law.
Len McCluskey has won the poll to be re-elected as general secretary of Unite, beating his nearest rival by over 5,500 votes. Mr McCluskey polled 59,067 votes, with Gerard Coyne securing 53,544 and Ian Allinson 17,143. The turnout was just over 12%, the Unite union said. Unite's acting general secretary Gail Cartmail said: "I congratulate Len McCluskey on his victory and would urge the entire union to pull together in the interests of our members, and not least to work for a Labour victory in the general election. "The sooner we can more to secure and secret workplace and online voting the better for union democracy."
import pygame import time import os, sys, math, pygame, pygame.font, pygame.image from pygame.locals import Rect import utils import random import processor.greedy5 as processor import abc '''Image credit Intro screen - https://pbs.twimg.com/media/DlE5j74XsAANQDX.jpg Main Screen - http://goldwakepress.org/data/Gaming-Tutorial-Blackjack-Casino-Game-Tutorial.jpg End Screen - https://gifer.com/en/9qR4 Card image - https://www.daniweb.com/attachments/0/Cards_gif.zip ''' #global var state = 1 #warna gray = (128,128,128) white = (255,255,255) silver= (192,192,192) navy =(0,0,128) black =(0,0,0) cyan=(0,255,255) aqua =(127,255,212) #text welcome class textWavey: def __init__(self, font, message, fontcolor, amount=10): self.base = font.render(message, 0, fontcolor) self.steps = range(0, self.base.get_width(), 2) self.amount = amount self.size = self.base.get_rect().inflate(0, amount).size self.offset = 0.0 def animate(self): s= pygame.Surface(self.size) s.set_colorkey((0,0,0)) height = self.size[1] self.offset += 0.1 for step in self.steps: src = Rect(step, 0, 2, height) dst = src.move(0, math.cos(self.offset + step*.02)*self.amount) s.blit(self.base, dst, src) return s class screen(abc.ABC): @abc.abstractmethod def loop(self): pass @abc.abstractmethod def eventLoop(self): pass class introScreen(screen): def __init__(self): bigfont = pygame.font.SysFont(None, 60) self.renderer = textWavey(bigfont, 'Welcome..', white, 14) def switchToMain(self): global state state = 2 def loop(self): x = (width*0.001) y = (height*0.001) jokerImg = pygame.image.load('assets/joker1.png') screen.blit(jokerImg, (x,y)) text = self.renderer.animate() screen.blit(text, (300, 400)) button('Start',350,480,100,50,gray,silver,self.switchToMain) def eventLoop(self): pass class mainScreen(screen): def __init__(self,deck): self.prog = -99 self.cardHover = False self.repickShape = True self.lastDeck = deck self.lastDeckSym = [] self.ekspresi = "" self.poin = 0 self.cardX = 350 self.cardY = 200 self.eks = pygame.image.load('assets/eks.png') self.score =pygame.image.load('assets/score.png') self.poinI = pygame.image.load('assets/poin.png') self.cardIBack1 = pygame.image.load('assets/back1.png').convert() self.cardIBack2 = pygame.image.load('assets/back2.png').convert() self.jackImg = pygame.image.load('assets/kartu.png') self.endI = pygame.image.load("assets/end.gif") def switchToEnd(self): global state state = 3 def updateParam(self,deck,poin, ekspresi): self.poin = poin self.ekspresi = ekspresi self.lastDeck = deck self.repickShape = True self.prog = 0 #untuk ambil kartu def pick_Card(self,deck,total): if len(deck)>0: out = utils.pick4card(deck) poin,ekspresi= processor.calculate(out[0]) total+=poin print(out[0]) return [False,out[1],total,ekspresi] else: return [True,[],total,None] def animateSys(self,prog): targetPosX = [-300,-100,100,300] targetPosY = 200 originPosX = 350 originPosY = 200 if (len(self.lastDeck)>0): if(self.repickShape): self.lastDeckSym = ['S','H','D','C'] for i in range(1,4): shape = ['S','H','D','C'] s = random.choice(shape) self.lastDeckSym[i] = s self.repickShape = False for i in range(1,5): x = originPosX + (targetPosX[i-1]) * (prog/100.0) y = originPosY + (targetPosY) * (prog/100.0) card(x,y,self.lastDeck[i-1],self.lastDeckSym[i-1]) else: x = (width*0.001) y = (height*0.001) screen.blit(self.jackImg, (x,y)) button('Reshuffle',350,300,100,50,cyan,aqua,game_loop) button('Exit ?',350,500,100,50,cyan,aqua,self.switchToEnd) def loop(self): x = (width*0.001) y = (height*0.001) cardX = 350 # x coordinate of card cardY = 200 # y coordinate of card screen.blit(self.jackImg, (x,y)) if self.cardHover: cardI = self.cardIBack2 else: cardI = self.cardIBack1 screen.blit(cardI,(cardX,cardY)) screen.blit(self.eks,(150,550)) screen.blit(self.score,(100,50)) screen.blit(self.poinI,(450,50)) poin_s=utils.countScore(self.ekspresi) message_display(self.ekspresi,400,575) message_display(str(round(poin_s,2)),500,70) message_display(str(round(self.poin,2)),150,65) if(self.prog!=-99): if(self.prog<100): self.prog+=1 self.animateSys(self.prog) def eventLoop(self): mouse = pygame.mouse.get_pos() click = pygame.mouse.get_pressed() if self.cardX+50 > mouse[0] > self.cardX and self.cardY+80 > mouse[1] > self.cardY: self.cardHover = True if click[0]==1 : _,deck,poin,ekspresi=self.pick_Card(self.lastDeck,self.poin) self.updateParam(deck,poin,ekspresi) else: self.cardHover = False class endingScreen(screen): def __init__(self): self.endI = pygame.image.load("assets/end.gif") def switchToM(self): global state state = 2 def loop(self): x = (width*0.001) y = (height*0.001) screen.blit(self.endI, (x,y)) button('Bye',150,330,100,50,white,cyan,quit) button('Play again',450,330,100,50,white,cyan,self.switchToM) def eventLoop(self): pass def text_objects(text, font): textSurface = font.render(text, True, black) return textSurface, textSurface.get_rect() #untuk kartu def card (x,y,nilai,s): nilai=str(nilai) cardImg =pygame.image.load('assets/'+s+nilai+'.gif') screen.blit(cardImg, (x,y)) #untuk kalimat def message_display(text,x,y): a = pygame.font.SysFont('Times New Roman',35) TextSurf, TextRect = text_objects(text,a) TextRect.center =(x,y) screen.blit(TextSurf, TextRect) #start button atau button lain def button(msg,x,y,w,h,ic,ac,action=None): mouse = pygame.mouse.get_pos() click = pygame.mouse.get_pressed() if x+w > mouse[0] > x and y+h > mouse[1] > y: pygame.draw.rect(screen, ac,(x,y,w,h)) if click[0] == 1 and action != None: action() else: pygame.draw.rect(screen, ic,(x,y,w,h)) smallText = pygame.font.SysFont("comicsansms",20) textSurf, textRect = text_objects(msg, smallText) textRect.center = ( (x+(w/2)), (y+(h/2)) ) screen.blit(textSurf, textRect) def game_loop(): global state deck = utils.getNewDeck() intro = introScreen() main = mainScreen(deck) end = endingScreen() screenObj = [intro, main, end] while 1: screenObj[state-1].loop() for event in pygame.event.get(): if event.type == pygame.QUIT: quit() screenObj[state-1].eventLoop() pygame.display.flip() if __name__ == '__main__': #insialisasi pywindow pygame.font.init() (width, height) = (800, 600) screen = pygame.display.set_mode((width, height)) pygame.display.flip() pygame.display.set_caption("24 game") game_loop()
package com.ximsfei.rush; import android.app.Application; import com.ximsfei.rush.db.DBHelper; import com.ximsfei.rush.util.SPUtils; /** * Created by ximsfei on 17-2-25. */ public class RushApplication extends Application { @Override public void onCreate() { super.onCreate(); SPUtils.init(this); DBHelper.init(this); } }
package seedu.homerce.model.util.uniquelist; /** * API of an item in a UniqueList. */ public interface UniqueListItem { boolean isSame(UniqueListItem other); }
<gh_stars>1-10 package compiling.tokens.types; public class AccessToken implements IToken { private final String name; public AccessToken(String name) { this.name = name; } @Override public String getText() { return name; } @Override public String toString() { return getText(); } }
How do job insecurity profiles correspond to employee experiences of workhome interference, selfrated health, and psychological wellbeing? Abstract Objectives Traditional variableoriented research has shown that employee perceptions of job insecurity (JI) are associated with negative consequences, including more workhome interference, poorer health, and impaired wellbeing. Besides the negative consequences of high JI, particular combinations of JI perceptions may also be associated with different consequences. Taking a personoriented approach, this study aimed to investigate whether it is possible to distinguish different combinations of JI perceptions among working women and men and whether such JI profiles involve different experiences of workhome interference, health, and wellbeing. Methods Selfreports in questionnaires of JI, including both quantitative and qualitative threats of perceived job loss, workhome interference (WHI), health, and psychological wellbeing came from 1169 whitecollar workers (52.4% women) in Sweden. Latent profile analysis was performed to identify JI profiles. Subsequent analyses included comparing profiles with respect to WHI, health, and wellbeing. Results Four distinct JI profiles were identified: Secure; qualityconcerned, Insecure: employmentconcerned, Insecure, and Secure. Comparisons of cluster profiles showed significant differences in workhome interference (familywork conflict), selfrated health, and psychological wellbeing. Conclusions Findings suggest that the Insecure profile may be most vulnerable to adverse consequences of perceived JI. Taken together, different JI profiles may be associated with differential experiences of workhome interference, health, and psychological wellbeing among working women and men. | Job insecurity and health Based on general conceptualizations of stress, anticipating a stressful event can be as stressful or perhaps even more stressful than the experience of the actual event : Threat perceptions elicit a stress response-an increase in arousal-to enable survival. 17 When the alarmed arousal is sustained over time (ie chronic JI), it is associated with health risks. 17 Perceived lack of control is central to harmful JI experiences. 9,18 Instead, a predictable threat of job loss allows individuals to start coping with the actual situation, with any positive expectancies regarding success (eg, new job) being associated with reduced alarm and arousal. 17 A plethora of cross-sectional and longitudinal studies show that perceived JI has various health-related consequences for employees. 5,19,20 Among such consequences are common mental and physical health complaints and also more severe mental health problems. A recent meta-analysis of perceived JI and mental health showed strong support for JI perceptions being associated with poorer mental health, particularly depressive symptoms. 19 | Job insecurity and psychological well-being Traditionally, research has focused on negative aspects of mental health (eg, depressive symptoms), meaning that fewer studies have included positive aspects, such as psychological well-being. Considering that JI can have detrimental consequences for mental health, individuals worrying about their future job situation may in fact hinder their own potential to make use of any available financial, social, and support resources to successfully cultivate positive psychological functioning in terms of mastery, personal development, selfesteem, and social relationships. These aspects of positive psychological functioning are included in a conceptualization of psychological well-being covering six dimensions: autonomy, environmental mastery, personal growth, positive relations with others, purpose in life, and self-acceptance. Circumstances where individuals have poorer opportunities to pursue these dimensions can contribute to impaired psychological well-being. 22 For instance, JI may involve the perceived threat of losing all aspects of the employment situation, not only the job. According to Jahoda's latent deprivation model, 23 employment serves both manifest and latent functions. In short, the manifest function relates to employment providing an income, while the latent function involves social and developmental opportunities, personal identity and status, a sense of collective purpose, and the structure provided by work. Thus, any threat of job loss puts important aspects of life at risk. 23 | Quantitative and qualitative JI While most JI research has focused on quantitative JI, that is, concerns regarding the future existence of the current job, 8 other studies also include qualitative JI, which involves | 3 of 12 concerns regarding the quality of the current employment situation. For instance, this includes valued work tasks, career opportunities, and salary development. 8 Similar to the negative associations between quantitative JI and health outcomes, qualitative JI has been associated with health-related impairment 24,25 and work-family conflict. 10 However, taking both aspects of JI into account, recent research has shown qualitative and quantitative JI to have differentiated effects on health and well-being outcomes. 26 Conceptualizing JI as a two-dimensional construct means that employees may differ in their experiences of JI and, for instance, worry about both quantitative and qualitative aspects or about one aspect only. | The person-oriented approach Current JI research has primarily taken a variable-oriented approach. This approach focuses on relationships between variables, often emphasizing the identification of predictors, mediators, moderators, and consequences in specific populations. Also, this approach often uses aggregate data and assumes homogeneity between individuals. However, it is well-known that individuals' experiences vary. A personoriented approach uses this variability between individuals, which allows for the investigation of groups, or profiles of individuals, showing similar variation in some characteristics of interest. As such, the approach is often defined as holistic, describing individuals as wholes. The approach includes different statistical methods analyzing within-person variation to group individuals into profiles according to their shared experiences. Such analyses allow for establishing different profiles and estimating their prevalence. Going beyond the somewhat static characteristics of high and low groups of variable-oriented research, a person-oriented approach allows for developing dynamic combinations of JI perceptions, their variation, and correlations. 27 | Aims and research questions This study aimed to add to the existing JI research through using a person-oriented approach to investigate JI profiles among working adults. This approach allows for a finegrained analysis of potential combinations of JI perceptions. Specifically, the first study aim was to distinguish JI profiles. Thus, research question 1 investigated whether it was possible to identify different JI profiles. The limited use of the person-oriented approach in previous studies makes it difficult to define priori which and how many profiles to distinguish. Thus, we adopted an exploratory approach, without specifying the number or the prevalence of profiles. Job insecurity indicators included both quantitative and qualitative aspects, and to explore their variability, they were investigated separately to allow for their unique characteristics. The second aim was to investigate whether experiences of work-home interference, health, and psychological wellbeing differed between JI profiles. Following previous variable-oriented research, individuals with more detrimental JI profiles were expected to report more work-home interference, poorer health, and psychological well-being than others. Thus, research question 2 investigated profile variation in health-related indicators. | Participants and data collection A stratified sample of women and men working full-time in one of four key occupational fields (administrative work, education, health care, or technology/natural sciences 28 ), employing both women and men at managerial and nonmanagerial levels, were invited to a questionnaire study. To be eligible, participants had to be between 32 and 58 years of age and work minimum 35 hours a week. The lower age-limit was set considering mean-ages in Sweden for receiving a university degree, and having a first child, while also considering that time is needed to get a job and return to full-time after parental leave. The upper age-limit was set considering the mean retirement age to ascertain that older respondents still had working years left before retiring. As for weekly working hours, Statistics Sweden defines full-time as minimum 35 hours per week. 29 Participants were identified through the Total Population Register with Statistics Sweden. Paper questionnaires, including information regarding the research and research ethics, were mailed through Statistics Sweden to home addresses of 2493 individuals fulfilling the criteria. About a week later, all received a first mailed "thank-you-and-reminder" card. After another four weeks, non-respondents received a second reminder (with questionnaire). Most (78%) returned their questionnaires after the first reminder. Questionnaires were returned in pre-addressed and postage-paid envelopes to Statistics Sweden. Statistics Sweden fully anonymized the data before distribution to the research group. The research project was approved by the Regional Ethics Committee (Ref. No. 2008/1593, and the study protocol passed internal ethical evaluation with Statistics Sweden. A total of 1396 individuals (response rate: 56%) volunteered participation. To be included in the analytic sample, participants had to fulfill the following criteria: currently work a minimum 35 hours per week, not report being on sick-leave for 100 or more days over the past 12 months, and have complete JI data. This resulted in 227 individuals not meeting these criteria (working less than 35 hours; n = 204; sickness absence 100 or more days: n = 24; incomplete JI data: n = 18; notably some did not meet several criteria). The analytic sample reported significantly better self-rated health than those excluded (M = 3.9 vs. 3.7, t = 3.06, P =.002, mean difference: 0.22, CI: 0.08-0.37). However, there were no significant age differences. The analytic sample included 1169 respondents (52.4% women). | Measures Job insecurity was measured using two subscales: quantitative (example item: I feel uneasy about losing my job in the near future) and qualitative JI (example item: I worry about getting less stimulating work tasks in the future) developed by Hellgren et al. The two JI dimensions were measured with three items each. 8,30 Response alternatives ranged from 1 (strongly disagree) to 5 (strongly agree). Reliabilities (Cronbach's ) were good (quantitative: 0.95; qualitative: 0.76). Work-home interference was measured using two twoitem scales developed by 31 of work-to-family conflict (WFC; example item: How often does your job or career interfere with your responsibilities at home, such as gardening, cooking, cleaning, repairs, shopping, paying bills, or childcare?) and family-to-work conflict (FWC; example item: How often does your private life keep you from spending the time you like to spend on work or career-related activities?). Response alternatives ranged from 1 (never) to 5 (constantly). For technical reasons, only individuals with children (64.5% of analytic sample) responded to WHI-items. Reliabilities (Cronbach's ) were good (WFC: 0.84; FWC: 0.78). Self-rated health was measured using a single-item measure by Idler and Benyamini asking respondents, "How would you rate your health?" Response alternatives ranged from 1 (excellent) to 5 (very poor). Ratings were reversed with high scores corresponding to better health. Given its holistic quality, this measure is considered a good indicator of health. 32 Psychological well-being was measured using the 18item short-form version of the Ryff scales, which covers six psychological well-being dimensions with three items each (example item: For me, life has been a continuous process of learning, changing, and growth). 22,33,34 Response alternatives ranged from 1 (disagree completely) to 6 (agree completely). Overall psychological well-being was measured using a composite score. Internal consistency (Cronbach's ) was 0.81. Demographics included age, gender (woman, man), relationship status (living with partner or not), children younger than 18 living at home (yes/no), and number of individuals in the household. | Data preparation and analytic strategy Latent profile analysis (LPA), a mixture modeling type used to identify subgroups with similar patterns within a larger population, was performed in Mplus 8.4 35 to identify latent JI profiles. Both JI indicators were included in the LPA to estimate the models. Using an uncorrelated specification and maximum likelihood (ML), six different models were estimated including one to six profiles. The following criteria were used to evaluate model fit 36 : sample-size adjusted Bayesian information criterion (SABIC 37 ), bootstrapped likelihood ratio test (BLRT 38 ), profile membership distribution, and posterior probabilities of each profile. The optimal solution is characterized by the lowest SABIC, the lowest significant BLRT, no groups with less than 5% of the sample, and no posterior probabilities below 0.70. 39 Tests for entropy were performed, 40 where a proportion of 0.80 or higher suggests a good classification. 36 To ensure stability, the best fitting solution was replicated three times. 41 To examine profile distinctiveness, multivariate analysis of variance (MANOVA) was performed to examine differences in mean levels in profile indicators, including profile membership as the independent variable. Additional analyses (chi-square tests for categorical variables, MANOVAs for WHI measures, and ANOVAs for remaining variables) examined differences in demographics and health-related indicators. With profile groups of different sizes and Levene's tests showing unequal group variances, Dunnett's T3 test (Plevel <.05), an adequate test for unequal variances, 42 was used for post-hoc comparisons. Otherwise, the Scheff test was used. These analyses were performed in SPSS 26. Table 1 presents the results from the LPA comparing different models. The 3-profile model had the lowest significant LMR-LRT in combination with the second lowest BLRT. However, the SABIC was lower for the 4-, 5-, and 6-profile model, coupled with high entropy coefficients and high posterior probabilities. Of the 4-, 5-, and 6-profile models, the 6-profile model had the lowest and significant LMR-LRT. But the 6-profile solution had two profiles with less than 5% of the total count with the remaining profiles also being rather small. This is similar to the 5-profile model which also had two profiles with less than 5% of the total count, another profile with a smaller percentage of the total count, and two profiles with a larger percentage of the total count. To align with the criteria of having profiles with at least 5% of the sample, the 5-and 6-profile models were rejected. The 4-profile model had two smaller profiles but, | Latent profiles in comparison with the 5-and 6-profile models, both these smaller profiles included more than 5% of the sample (5.74 and 6.16, Table 1). Also, posterior probabilities and entropy of the 4-profile model were high, and the LMR-LRT was lower than that of the 5-profile model. Comparing the 3-and 4-profile models showed that the 4-profile model had the lower SABIC, a significant BLRT, the higher entropy, and a strongly significant LMR-LRT. This, along with its successful replication, supported the 4-profile model as the best fitting model. A MANOVA examining mean-level differences showed an overall significant effect (Pillai's trace = 0.98, F = 370.86, P <.0001, eta2 = 0.49) with subsequent ANOVAs, reported in Table 2, showing significant effects for both quantitative (F = 9105.80, P <.0001, eta2 = 0.96) and qualitative (F = 136.48, P <.0001, eta2 = 0.26) JI. Dunnett's t tests showed that Profile 1, the second largest group (n = 238), was significantly different from all other profiles in both quantitative and qualitative JI, exhibiting the second lowest mean-levels on both JI indicators. Profile 2, the second smallest group (n = 149), had the second highest mean-levels on both indicator measures. Consistently significant differences from all other profiles emerged for quantitative JI only. Profile 3, the smallest group (n = 67), exhibited the highest mean-levels for both indicators. These mean-levels were significantly higher than in all other profiles for quantitative JI, whereas Dunnett's t tests showed no statistically significant difference in qualitative JI between Profiles 2 and 3. Profile 4, the largest group (n = 715), exhibited the lowest meanlevels for both JI indicators and post hoc comparisons showed that mean-levels of quantitative and qualitative JI were significantly lower than in all other profiles ( Table 2). Table 3 shows means, standard deviations, and Pearson correlations for all study variables. Table 4 presents demographics and health-related characteristics for the profiles. For gender distribution and family situation, no statistically significant differences emerged. For age, a significant effect emerged (F = 3.29, P <.05, eta2 = 0.01), with Scheff tests suggesting a difference (P <.05) between Profiles 1 and 4 with those in Profile 4 being significantly older than those in Profile 1. For WHI, there was a significant overall effect (Pillai's trace = 0.20, F = 2.53, P <.02, eta2 = 0.01). Subsequent analysis showed that the effect for WFC was more of a trend (F = 2.77, P =.041, eta2 = 0.01), while the effect for FWC was significant (F = 4.28, P <.01, eta2 = 0.02). Scheff tests showed one statistically significant profile difference: FWC was higher in Profile 2 than in Profile 4 (P <.05). As for SRH, there was a statistically significant effect (F = 12.86, P <.0001, eta2 = 0.03) with Dunnett's t tests showing differences between Profiles 1 and 3 (P <.05) with Profile 1 having better SRH; Profiles 2 and 4 (P <.001), and Profiles 3 and 4 (P <.0001) with Profile 4 having better SRH than Profiles 2 and 3. For PWB, there was a statistically significant effect (F = 22.08, P <.0001, eta2 = 0.06), with Scheff tests showing statistically significant effects between Profiles 1 and 4 (P =.0001), 2 and 4 (P =.0001), and 3 and 4 (P =.0001), with Profile 4 having higher well-being than the others. | DISCUSSION Employing a person-oriented approach, the current study investigated working individuals' JI perceptions and their Results were even more clear-cut when analyzing the full sample and including individuals working less than 35 hours a week and those on sick-leave (results not shown). **P <.01.; ***P <.001 associations with work-home interference, health, and psychological well-being. The LPA yielded four distinctly different profiles of individuals based on their subjective perceptions of quantitative and qualitative JI entitled: Secure; qualityconcerned, Insecure: employment-concerned, Insecure, and Secure. These profiles were then compared with respect to reports of WFC, FWC, self-rated health, and psychological well-being. Similar to findings from variable-oriented research, results from the current study indicate that the Insecure profile (Profile 3) tends to fare worse than the Secure profile (Profile 4). Overall findings of this study suggest that different JI profiles may correspond to differential experiences of workhome interference, health, and psychological well-being. | Job insecurity profiles This study is among the first to investigate profiles of quantitative and qualitative JI using a person-oriented approach. Besides this approach allowing the identification of specific groups or profiles with similar combinations of JI perceptions, it allows for estimating the prevalence of particular profiles. 36 The Secure profile (Profile 4) was undoubtedly the most prevalent profile, reflecting that the majority had low levels of both quantitative and qualitative JI. The second largest group, the Secure; quality concerned (Profile 1), had lower quantitative JI in combination with intermediate levels of qualitative JI. The third group, about 10 percent of the sample, included the Insecure; employment concerned (Profile 2) with intermediate levels of both quantitative and qualitative JI, but somewhat higher quantitative JI. The smallest group, Profile 3, included the Insecure who had the highest levels on both JI measures. These profile characteristics reveal that there is a complexity to perceptions of JI, which can be illustrated through a person-oriented approach. Despite differing in their prevalence, the profiles identified here suggest that reports of JI may involve a complex interplay of perceptions of one's employment situation even in a fairly homogeneous sample of gainfully employed individuals. As regards demographics, there were no consistent variations between profiles in the percentages of women and men or family situation (partner, children, household size). For age, the Secure (Profile 4) were older than the Secure; quality concerned (Profile 1). However, it is difficult to say whether a small difference of approximately one year is meaningful from a real-life perspective. | Job insecurity profiles and work-home interference Job insecurity profile contributed to some variation in workhome interference. Secure individuals (Profile 4) tended to report less conflict than other profiles of JI. In contrast, Insecure employees (Profile 3) may be at a greater risk for conflict, especially WFC if preoccupation with avoiding redundancy causes physical and/or psychological absence in one's family role (as suggested by Voydanoff 11 ). Indeed, the Insecure (Profile 3) reported the highest WFC, which follows previous findings. 43 However, post hoc tests showed no consistent profile differences in WFC, which may relate to all working full-time and having children. As for FWC, the Secure (Profile 4) had lower levels than the Insecure: employment-concerned (Profile 2). Despite statistical significance, this should probably be considered more of a trend (P >.05). Still, the result may relate to strain-based psychological spillover among the Insecure: employment-concerned (Profile 2) due to concerns about their employment. This aligns with research 44 suggesting that conflict in one domain can be an outcome of strain in another. For instance, the Insecure: employment-concerned (Profile 2) may reduce their participation in the family domain and over-prioritize work in attempting to prevent actual job loss. WFC may also arise if the family's economic situation and well-being is threatened by job loss. 11 However, future studies with indepth information on WHI from individuals with more diverse family situations than those who responded here are needed to further our understanding of these mechanisms especially regarding the role that having a partner, living with children, or being single may play. | Job insecurity type and overall selfrated health Job insecurity profile accounted for a considerable amount of variation in self-rated health, suggesting that individuals who perceive some degree of JI may be more likely to rate their overall health more poorly in comparison to individuals who are not worried about their job security. Specifically, the Secure profile (Profile 4) had better overall health than the others. The Insecure profile (Profile 3) reported the lowest overall health scores of all, which is consistent with the metaanalytic findings of JI and health. 19 The finding that Secure; quality-concerned (Profile 1) and Insecure: employmentconcerned (Profile 2) profiles did not differ significantly in terms of self-rated health may be explained by equally high salience of different identity centralities. For instance, for the Secure; quality-concerned (Profile 1), it may be their work-centric identity, whereas for the Insecure: employmentconcerned (Profile 2), it may be their family-centric identity. This can be paralleled to earlier findings 45 showing that job insecure respondents were more likely to report a weaker social identity as an employed person compared with job secure respondents. Furthermore, the social identity of being an employed person influenced well-being and mediated the effect of JI on well-being over time. In other words, despite the fact that these JI profiles may account for different patterns of experiences, it does not necessarily mean that one profile contributes to poorer overall health than the other. Thus, this finding is somewhat consistent with variable-oriented results of quantitative and qualitative effects. 24 | Job insecurity type and psychological well-being The Secure profile (Profile 4) exhibited the highest scores of positive psychological functioning in comparison to other profiles. Further, the Insecure profile (Profile 3) exhibited the lowest scores of psychological well-being, which may be explained by Jahoda's latent deprivation model. 23 Considering that the Insecure profile (Profile 3) is characterized by high concern for all aspects of the job, the anticipation of losing both manifest and latent benefits of employment is likely an adverse contributor to impaired psychological functioning. 23 However, there were no significant differences between the Secure; quality-concerned (Profile 1) and Insecure: employment-concerned (Profile 2) profiles. This is consistent with the previous research findings, 46 where the threat of losing manifest or latent benefits both contribute significantly to poor psychological well-being. A somewhat unexpected finding was that there was no significant difference between Insecure: employment-concerned (Profile 2) and Insecure (Profile 3) profiles in psychological well-being. It would be reasonable to assume that the Insecure profile (Profile 3) would score significantly lower due to the combined effects of high quantitative and qualitative insecurity. Not finding such a difference may relate to the study's context: meta-analytical results 47 have shown that the level of social security in the country of data collection may be important for the relationship between JI and performance outcomes. The argument-that reactions to a threat of job loss may be less severe in a country with a strong social security system because employees expect assistance from society to handle the consequences-might also be applicable to health-related outcomes. The present study was conducted in Sweden, where the social security system is relatively extensive compared with many other countries. 48 A relevant future line of research would involve investigating the importance of the welfare context for well-being outcomes. | Limitations The decision to exclude individuals reporting 100 or more days of sick leave was made to ensure that the respondents were working at the time of the survey. Some may have been employed but not working regularly due to ill-health; thus, memory effects would have biased their responses. This means that individuals with poorer health were excluded, a bias known as the healthy worker effect. Thus, the findings probably underestimate health variations in conjunction with perceived JI. Similarly, our study targeted full-time workers, thus excluding part-time workers who often have a more vulnerable position and higher levels of job insecurity than fulltime workers. Those on extended sick-leaves and part-time workers were invited accidentally since updating of Swedish registers lags behind. Despite this, it should also be noted that the 4-profile solution was even more clear-cut when analyzing the full sample (including respondents working less than 35 hours and those on sick-leave; results not shown). Obviously, extended sick-leaves or employment conditions (including part-time work) are important for job insecurity perceptions. Thus, researching groups with more objectively insecure positions will probably produce profiles and prevalence figures that are different from those of full-time workers. Regarding the study design: this study was crosssectional which limits conclusions about causality. 49 Thus, future studies should ideally include longitudinal designs and strive for representative sampling across different groups, including part-time workers and individuals with health problems hindering full-time work. | Conclusions These findings show that variation of complex perceptions of JI can exist even within a somewhat homogenous sample of relatively privileged individuals who are well-educated and work full-time. The identification of specific JI patterns and what they mean for experiences of work-home interference, health, and psychological well-being contribute to further the understanding of how perceived JI can manifest, which may be important for health-related consequences of individuals both within and outside the professional domain as well as for organizational productivity. In society, social welfare systems may alleviate some of the negative consequences that individuals who perceive JI might incur, not only with respect to unemployment, but also with respect to their health and well-being.
<gh_stars>0 package com.questnr.requests; public class UsernameRequest { private String username; public String getUsername() { return username; } public void setUsername(String username) { this.username = username.toLowerCase(); } }
Services include medical, surgical, obstetrics, orthopaedics, emergency, day surgery, intensive care, coronary care, rehabilitation, oncology, sleep studies, interventional cardiac and mental health. The cardiac and vascular catheter laboratory helps the people living in Ballarat and surrounding regional areas with diagnosis and treatment of heart, stroke and vascular disease. The Emergency Department is a purpose built facility with 13 treatment bays for the management of acute illness and injury and is open 24 hours a day, seven days a week. Specialist staff, no waiting times, the latest medical technology and the facilities of the hospital combine to ensure critical care of the highest standard for everyone. The Emergency Department is able to cater for most emergency and trauma situations including cardiac care. We exceed best practice for time from diagnosis to catheter laboratory treatment. The Inpatient Rehabilitation Unit provides cardiac, reconditioning, orthopaedic programs and therapy facilities, including a hydrotherapy pool. The unit builds on and complements existing clinical services within the hospital. the community with renewed confidence and hope. The Outpatient Rehabilitation Unit provides a multidisciplinary service to help promote and maintain functional independence following cardiac, orthopaedic and joint surgery, stroke, and the resultant loss of condition that may accompany medical or surgical conditions. The multidisciplinary team comprises a rehabilitation physician, exercise physiologists, physiotherapist, occupational therapist, rehabilitation nurse, oncology rehabilitation nurse, speech therapist, dietician, and psychologist. Our day services offer the highest quality specialised care including surgery, cardiology, endoscopy and chemotherapy. There are no waiting lists and a full complement of hospital services is available if required. Accommodation in Rotary House onsite is available for patients and family travelling from regional areas. Our Maternity Unit provides inclusive compassionate health care, and a wide range of excellent support services. Women and babies are cared for by our dedicated midwives, obstetricians and pediatricians and provide family centered care to meet individual needs. Prospective parents are welcome to tour all areas of the maternity ward, where they can meet our team and view the hospital’s modern facilities. Our antenatal midwife will discuss the parents needs, offer childbirth and parenting education classes and answer all your questions and provide a free birth information kit.
package uk.ac.susx.tag.norconex.crawlstore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import uk.ac.susx.tag.norconex.crawler.ContinuousRecrawlableResolver; import java.io.Serializable; /** * A single set of stats for a single url. * Used to estimate the interval that should be left until the next crawl. * @author jp242 * */ public class ContinuousMetadata implements Serializable { protected static final Logger logger = LoggerFactory.getLogger(ContinuousRecrawlableResolver.class); private long checkedCount; private long changeCount; private long nextCrawl; public ContinuousMetadata() { checkedCount = 1; changeCount = 0; nextCrawl = 0; } public void incrementChangeCount() { changeCount++; } public void incrementCheckedCount() { checkedCount++; } public long getCheckedCount(){ return checkedCount; } public long getChangeCount() { return changeCount; } public long getNextCrawl() { return nextCrawl; } public void setNextCrawl(long crawl) { if(crawl > nextCrawl) { nextCrawl = crawl; } } }
#include <bits/stdc++.h> using namespace std; int main() { string S, T; getline(cin, S); stringstream X(S); while (getline(X, T, ',')) { cout <<stoi( T )<<" "; } cout<<endl; return 0; }
Perceptions of a Quality Program and Relationships with Work Perceptions and Job Attitudes Numerous organizations have incorporated quality-oriented interventions to improve effectiveness; however, little systematic research has concerned how employees' perceptions about such interventions influence perceptions of work characteristics or job-related attitudes. The present study of 113 professional and support staff in a large petrochemical company suggested that perceptions about management's commitment to the success of the intervention was related to perceptions of reduced role stress, more positive relationships with management and more positive job-related attitudes.
Outcomes of deep sedation for catheter ablation of paroxysmal supraventricular tachycardia, with adaptive servo ventilation Abstract Background Catheter ablation for paroxysmal supraventricular tachycardia (PSVT) is an established treatment, but the effect of deep sedation on PSVT inducibility remains unclear. Aim We sought to examine PSVT inducibility and outcomes of catheter ablation under deep sedation using adaptive servo ventilation (ASV). Methods We retrospectively evaluated consecutive patients who underwent catheter ablation for PSVT under deep sedation (Propofol + Dexmedetomidine) with use of ASV. Anesthetic depth was controlled with BIS™ monitoring, and phenylephrine was administered to prevent anesthesiainduced hypotension. PSVT induction was attempted in all patients using extrastimuli at baseline, and after isoproterenol (ISP) infusion when necessary. Results PSVT was successfully induced in 145 of 147 patients, although ISP infusion was required in the majority (89%). The PSVT was atrioventricular nodal reentrant tachycardia (AVNRT) in 77 (53%), atrioventricular reciprocating tachycardia (AVRT) in 51 (35%), and atrial tachycardia (AT) in 17 (12%). A higher ISP dose was required for AT compared to other PSVT (AVNRT: 0.06 (IQR 0.030.06) vs AVRT: 0.03 (0.020.06) vs AT: 0.06 (0.030.12) mg/h, P =.013). More than half (51%) of the patients developed hypotension requiring phenylephrine; these patients were older. Acute success was obtained in 99% (patients with AVNRT had endpoints with single echo on ISP in 46%). Longterm success rate was 136 of 144 (94%) (AVNRT 96%, AVRT 92%, and AT 93%). There were no complications related to deep sedation. Conclusions Deep sedation with use of ASV is a feasible anesthesia strategy for catheter ablation of PSVT with good longterm outcome. PSVT remains inducible if ISP is used. | BACKG ROU N D Catheter ablation is increasingly being utilized as first-line therapy for treating paroxysmal supraventricular tachycardia (PSVT). While for atrial fibrillation (AF) ablation, deep sedation and general anesthesia are commonly used for the sake of patient comfort and improved catheter manipulation, 1 for PSVT ablation, minimal sedation is usually used. Effect of deep sedation on tachycardia inducibility during PSVT ablation is unknown, and optimal endpoint, long-term outcomes are not well described. Propofol is often used during deep sedation for its ease of anesthesia depth adjustment, but can lead to severe respiratory inhibition. 2 Dexmedetomidine is a safe anesthetic agent with minimal suppression of spontaneous breathing, 3 but its weakness is lack of immediate anesthesia depth control owing to its extended half-life. Recently, combined dexmedetomidine and propofol use was reported to result in better anesthesia depth control with fewer sedation-related adverse events during MRI. 4 Adaptive servo ventilation (ASV) is a respiratory support system which is often used in sleep apnea syndrome. It was recently applied to pulmonary vein isolation procedures, but its utility for PSVT ablation has not been studied. 5 We sought to evaluate inducibility, optimal endpoint, and longterm outcomes in catheter ablation of PSVT under deep sedation using propofol and dexmedetomidine with respiratory support by ASV. | Study population We evaluated consecutive patients who underwent first-time catheter ablation for PSVT from 2013 to 2018 under deep sedation (propofol + dexmedetomidine) with use of ASV. For accurate evaluation of PSVT inducibility, patients without documented ECG tracings of their tachycardia before the procedure, patients with PSVT coincidentally induced during AF ablation, and patients with prior AF ablation were excluded from this study. Patients were enrolled by reviewing electrophysiological study (EPS) summary and chart records. Intracardiac electrogram data during EPS/ablation were re-analyzed offline. All patients signed a written informed consent for the ablation procedure. This study was approved by the Kameda Medical Center Review Board. | Anesthesia and electrophysiological study Antiarrhythmic drugs were discontinued more than 5 half-lives before the ablation procedure. Personnel for each electrophysiology (EP) study consisted of fully trained electrophysiologists, EP fellows, and EP nurses, all of whom were trained in airway management and administration of anesthetic drugs. Patients were continuously monitored by pulse oximetry. if patients had anesthesia-induced glossoptosis. End-tidal CO 2 was routinely used to monitor ventilation. Electrocardiograms were recorded throughout the procedure and arterial blood pressure was measured invasively using Arterial catheter mini kit™ (20ga x 6 inch Argon Medical). Sedation was administered by a trained EP nurse under supervision of the operating physician. After an initial bolus administration of fentanyl citrate and propofol, continuous administration of propofol and dexmedetomidine was started. Dexmedetomidine was administered with initial high dose of 6 g/ kg/h over 10 minutes, followed by a maintenance dose of 0.6 g/ kg/h. Anesthetic depth was controlled with BIS™ monitoring (A-3000 BIS XP Platform™; Aspect Medical System, Natick, MA, USA) which was adjusted around 40 to achieve Ramsay sedation scale of 6. Local anesthesia was administered after the initiation of intravenous anesthesia, and the catheter sheaths were inserted from the right femoral vein and right subclavian vein. According to the pain and consciousness of the patients, additional fentanyl citrate was administered and propofol was titrated with attention to hypotension and respiratory depression. Electrode catheters were placed at the right ventricular apex, His region and high right atrium (RA) via the right femoral vein, and another was placed at the coronary sinus via the right subclavian vein. Phenylephrine was administered to prevent anesthesia-induced hypotension, and systolic blood pressure was kept to >80 mmHg. After measuring effective refractory period and Wenckebach rate in both atrioventricular (AV) conduction and ventriculoatrial (VA) conduction, PSVT induction was attempted using extrastimuli up to double extrastimuli and/or burst pacing from atrium/ventricle at baseline, Conclusions: Deep sedation with use of ASV is a feasible anesthesia strategy for catheter ablation of PSVT with good long-term outcome. PSVT remains inducible if ISP is used. K E Y W O R D S adaptive servo ventilation, catheter ablation, deep sedation, dexmedetomidine, paroxysmal supraventricular tachycardia including in patients with manifest WPW syndrome. If PSVT was not inducible at baseline, then isoproterenol (ISP) was administered targeting a heart rate (HR) increase of 10%, and PSVT induction was attempted again. If PSVT was still not inducible, the dose of ISP was increased incrementally and induction attempted at the new dose. This process was repeated until sustained PSVT was induced. PSVT was categorized into 3 groups: atrioventricular node reentrant tachycardia (AVNRT), atrioventricular reciprocating tachycardia (AVRT), and atrial tachycardia (AT), as defined by others. We diagnosed the mechanism of AT as re-entry when any of the following criteria were fulfilled: (i) AT could be reproducibly initiated and terminated with programmed stimulation; (ii) fulfillment of the criteria for manifest and concealed entrainment; (iii) reentry circuit shown by 3D activation mapping, and differentiated from other AT mechanisms (abnormal automaticity and triggered activity). 10 EPS and ablation were performed using an electroanatomical mapping system (Ensite™). | Ablation strategy and success Fentanyl citrate 0.05 mg was routinely administered before start of radiofrequency (RF) application, and was also administered when necessary for pain control according to the patient's body weight. Propofol administration was also adjusted to control pain and to maintain adequate BIS score. Slow pathway ablation was performed for slow/fast AVNRT using non-irrigated RF catheter typically started at 20 W, and increased to a maximum 30W with target temperature of maximum 55°C. For slow/slow and fast/slow AVNRT, slow pathway ablation was performed first, and then additional ablation of the earliest atrial VA conduction site via slow pathway was performed if tachycardia was still inducible. During slow pathway ablation, RF application was continued until appearance of junctional beats, and repeat induction was performed permitting one single echo under ISP. If 2 or more echo beats or tachycardia induction remained, RF application was repeated after discontinuing ISP infusion, and tachycardia induction was performed again after resumption of ISP infusion. As for AVRT, accessory pathway ablation was performed after the induction of tachycardia. A supravalvular approach was used for left-side accessory pathways (AP) via transseptal approach first, and then subsequently a subvalvular approach via transaortic approach was tried for difficult cases. After elimination of AP, non-inducibility of the tachycardia was confirmed. In patients with AT, the earliest activation site indicated by electroanatomical mapping was targeted for ablation. As for perinodal ATP-sensitive AT of which the mechanism is considered to be macro-reentry, ablation was tried at the reentrant site outside of Koch's triangle first, and then the earliest activation site was targeted. 11,12 Either of non-irrigated or irrigated catheters were used in AVRT and AT ablation by physician choice, with maximum power up to 35 W. Ablation was deferred if the target site was in close proximity (≤5 mm) to the His region. After successful ablation of PSVT, reinduction of tachycardia was attempted using the same amount of ISP that had been necessary for induction before ablation. After the procedure, antiarrhythmic drugs for PSVT were discontinued excluding beta blockers when they were being prescribed for hypertension. | Follow-up Patients were evaluated in the outpatient setting at 3-6 weeks after ablation and subsequently at 3-to 6-month intervals if possible. Holter monitoring (24-hour duration) was performed if they had palpitations or symptoms which suggested recurrence of tachycardia. After several follow-up visits to our hospital, patients were sent back to referring cardiologists/physicians if they had no symptoms. Patients were referred back to our hospital if the same symptoms recurred or when recurrence of the tachycardia was suspected. | Statistical analysis All values are expressed as median with interquartile range. Comparisons of continuous variables were analyzed with one-way ANOVA. Categorical variables expressed as numbers and percentages between different groups were compared with Pearson's chisquared test, and two group comparisons were performed with Fisher's exact test. HR change was validated with Student's t test. Statistical significance was defined as P <.05. JMP ® 14.3 (SAS Institute Inc, Cary, NC, USA) was used for analysis. | Population and PSVT category Of 153 patients who underwent first-time EPS/ABL for PSVT from July 2013 to September 2018, 6 patients were excluded (no documented tachycardia tracing before the procedure in 2, ASV not used in 3, and data missing in 1), leading to inclusion of 147 patients in this study (76 women (52%), age 62 ). Of these, 145 patients (99%) had successful tachycardia induction. The remaining 2 patients only had induction of single-or two-echo beats by programmed stimulation, even with ISP. In one of those two patients, tachycardia induction was also attempted after discontinuation of deep sedation, but was unsuccessful. Of the 145 patients who had successful tachycardia induction, catheter ablation was deferred in two patients because of the risk for cardiac conduction system injury; one had junctional tachycardia, AT originated close to the sinus node in the other. In total, 144 patients underwent catheter ablation, with the inclusion of one of the two patients in whom we failed to induce sustained PSVT (Figure 1). This particular patient had induction of two-echo beats, had no accessory pathways, had documented short RP' tachycardia on the preprocedure 12-lead ECG, and was diagnosed as having slow/fast AVNRT. PSVT was diagnosed as AVNRT in 77 (54%), AVRT in 51 (35%), and AT in 17 (11%). AVNRT was diagnosed after using entrainment pacing from the ventricle (ventricular burst pacing and/or ventricular pacing simultaneous with His bundle activation). AT patients were more likely older, female, and had lower EF, higher incidence of diabetes mellitus, dyslipidemia, and heart failure ( Table 1). Table 2). Vital signs and the amount of ISP/phenylephrine administered are given in Table 3 Table S1. (Table 4). A higher dose of ISP was used for the reinduction of tachycardia after ablation in patients with AT compared to those with AVNRT or AVRT. The 78 patients with AVNRT who received catheter ablation were classified into 3 subtypes: slow/fast AVNRT in 59, fast/slow AVNRT in 12, and slow/slow AVNRT in 7 (Table 5) | Major findings Our major findings were as follows: Deep sedation with use of ASV is a feasible anesthesia strategy for catheter ablation of PSVT regardless of its subtype (AVNRT, F I G U R E 2 Heart rate (HR) changes owing to sedation and isoproterenol (ISP). After the initiation of sedation, average HR decreased from 70 to 54 bpm. Patients whose tachycardia could not be induced received ISP, and the average HR had increased to 91 bpm at the time of PSVT induction. Note that average HR at PSVT induction is higher than that before sedation AVRT, and AT). TA B L E 2 Clinical characteristics of the patients with or without phenylephrine administration Inducibility of the PSVT remains preserved with the use of ISP. Required dose of ISP was higher in patients with the AT subtype. In patients with AVNRT, "single echo beat by extra stimulation under ISP" is a feasible ablation endpoint even under deep sedation as it is for conventional minimal sedation. | Need of deep sedation for PSVT ablation Currently, PSVT ablation is often performed under local anesthesia and minimal sedation with an oxygen mask or nasal cannula. 13 This conventional method is safe and inducibility for tachycardia is preserved. However, patients do experience pain during the groin puncture and RF application, and also palpitations during pacing and induced tachycardia, throughout a procedure that can last several hours. Indeed, EPS procedures produce anxiety, pain, and discomfort in more than 50% of patients. 13 If tachycardia inducibility is preserved and results in good outcomes, deep sedation should be given as an option to patients for PSVT ablation as it often is for AF ablation in recent years. Furthermore, we believe that steady anesthesia in the catheter laboratory has the advantage of preventing medication error. Our results expand the options for anesthesia strategy during PSVT ablation beyond minimal sedation, and we expect it to lead to greater patient satisfaction. 2 We used a combination of sedation drugs in this study, dexmedetomidine, which suppresses spontaneous breathing less, and | Induction during deep sedation propofol, which has fast anesthesia depth control. Although dexmedetomidine has negative cardiovascular effects such as bradycardia as well as hypotension, its clinical feasibility for PSVT ablation has been previously demonstrated. Propofol is supposed to have less negative influence on the cardiac conduction system, and can be a favorable anesthetic agent in PSVT ablation. 24,25 In our study, the combination of dexmedetomidine and propofol enabled us to achieve both preserved inducibility and a safe procedure without complications related to deep sedation. 19 Utility of propofol in catheter ablation for AVNRT has been described in the past, 24,26 but we evaluated the inducibility and the outcome not only for AVNRT but also for other types of PSVT, and under deep sedation with ASV instead of minimal sedation. | Ablation success Both acute and long-term success rates were comparable to studies using other anesthetic modalities. 27 In the patients with AVNRT, "single echo beat by extra stimulation under ISP" is a feasible ablation endpoint under deep sedation as well as conventional minimal sedation. Also, in the patients with AVRT or AT, disappearance of the accessory pathway and the elimination of tachycardia with confirmed non-inducibility by pacing stimulation under ISP is a compatible ablation endpoint in deep sedation. It may be that deep sedation is associated with satisfactory long-term outcome owing to the increased ease of catheter F I G U R E 4 Acute and long-term success manipulation; the same concept has been reported in AF ablation. 5 To prove this, however, prospective evaluation comparing outcomes to minimal sedation is needed. | LI M ITATI O N S This is a retrospective, single-center, observational study. Additionally, not all patients were followed at our institution after undergoing the ablation procedure and, in those patients, we had to rely on the information gathered from the referring healthcare providers for arrhythmia outcomes. ISP dose for inducing PSVT and ablation endpoint in AVNRT were left to the physicians. Cardiac function was preserved in our population (average EF 68%), so we were not able to ascertain whether this anesthesia strategy requiring vasotropic support and high dose of isoproterenol could also be applied to patients with severe LV dysfunction. | CON CLUS ION Deep sedation with use of ASV is a feasible anesthesia strategy for catheter ablation of PSVT with good long-term outcome. PSVT inducibility remains preserved when ISP is used. The required ISP dose differs based on PSVT type. In patients with AVNRT, "single echo beat by programmed stimulation under ISP" is a feasible ablation endpoint even under deep sedation, as it is for conventional minimal sedation. D I SCLOS U R E The authors declare no conflict of interest for this article. The protocol for this research project has been approved by a suitably constituted Ethics Committee of the institution and it con-
Also known as the water bear, the tardigrade has a lot to be proud of — this tiny organism is nigh-indestructible, known to have survived in extreme temperatures ( -272C to +151C / -457.6F to 303.8F) and to be the only animal that can brave the vacuum of space unprotected and live to tell the tale. A team from the University of North Carolina at Chapel Hill, curious as to how the tardigrade can accomplish such incredible feats, sequenced the genome of the microorganism. Their paper, published in the journal PNAS, reveals that a huge chunk of its DNA is of foreign origin — nearly 17.5% of the water bear’s genome (some 6000 genes) are primarily of bacterial origin, though genes from fungi and plants have also been identified. Defined as the shifting of genetic material materially between organisms, horizontal gene transfer is widespread in the microscopic world. The process occurs in humans too but in a limited fashion, and via transposons and viruses. Microscopic animals however are known to have large complements of foreign genes. Until today, the rotifer held the title for ” the greatest complement of foreign DNA of any microscopic organism,” but the newly-sequenced tardigrade genome includes twice as many genes as those boasted by the rotifer. And the authors have a theory as to why this extremely extensive gene transfer may have occurred. Tardigrades have long been known to undergo and survive the process of desiccation (extreme drying out). The authors believe that this process is extremely harsh on the tardigrade’s genome, with strands of DNA suffering significant sheering and breakage, causing a general loss of integrity and leakiness of the water bear’s nucleus. This may allow foreign genetic material to easily exploit such gaps in the genome and integrate themselves, similar to the gene-transfer procedure known as electroportation. For now, the tardigrade has a dual claim to fame, being the only known animal to survive the vacuum of space, and being the animal with the largest genetic complement. Not bad for a 1.5mm long bug.
from typing import Optional, Union, List from .decoder import MAnetDecoder from ..encoders import get_encoder from ..base import SegmentationModel from ..base import SegmentationHead, ClassificationHead class MAnet(SegmentationModel): """MAnet_ : Multi-scale Attention Net. The MA-Net can capture rich contextual dependencies based on the attention mechanism, using two blocks: - Position-wise Attention Block (PAB), which captures the spatial dependencies between pixels in a global view - Multi-scale Fusion Attention Block (MFAB), which captures the channel dependencies between any feature map by multi-scale semantic feature fusion Args: encoder_name: Name of the classification model that will be used as an encoder (a.k.a backbone) to extract features of different spatial resolution encoder_depth: A number of stages used in encoder in range [3, 5]. Each stage generate features two times smaller in spatial dimensions than previous one (e.g. for depth 0 we will have features with shapes [(N, C, H, W),], for depth 1 - [(N, C, H, W), (N, C, H // 2, W // 2)] and so on). Default is 5 encoder_weights: One of **None** (random initialization), **"imagenet"** (pre-training on ImageNet) and other pretrained weights (see table with available weights for each encoder_name) decoder_channels: List of integers which specify **in_channels** parameter for convolutions used in decoder. Length of the list should be the same as **encoder_depth** decoder_use_batchnorm: If **True**, BatchNorm2d layer between Conv2D and Activation layers is used. If **"inplace"** InplaceABN will be used, allows to decrease memory consumption. Available options are **True, False, "inplace"** decoder_pab_channels: A number of channels for PAB module in decoder. Default is 64. in_channels: A number of input channels for the model, default is 3 (RGB images) classes: A number of classes for output mask (or you can think as a number of channels of output mask) activation: An activation function to apply after the final convolution layer. Available options are **"sigmoid"**, **"softmax"**, **"logsoftmax"**, **"tanh"**, **"identity"**, **callable** and **None**. Default is **None** aux_params: Dictionary with parameters of the auxiliary output (classification head). Auxiliary output is build on top of encoder if **aux_params** is not **None** (default). Supported params: - classes (int): A number of classes - pooling (str): One of "max", "avg". Default is "avg" - dropout (float): Dropout factor in [0, 1) - activation (str): An activation function to apply "sigmoid"/"softmax" (could be **None** to return logits) Returns: ``torch.nn.Module``: **MAnet** .. _MAnet: https://ieeexplore.ieee.org/abstract/document/9201310 """ def __init__( self, encoder_name: str = "resnet34", encoder_depth: int = 5, encoder_weights: Optional[str] = "imagenet", decoder_use_batchnorm: bool = True, decoder_channels: List[int] = (256, 128, 64, 32, 16), decoder_pab_channels: int = 64, in_channels: int = 3, classes: int = 1, dropout: Optional[float] = None, activation: Optional[Union[str, callable]] = None, aux_params: Optional[dict] = None ): super().__init__() self.encoder = get_encoder( encoder_name, in_channels=in_channels, depth=encoder_depth, weights=encoder_weights, ) self.decoder = MAnetDecoder( encoder_channels=self.encoder.out_channels, decoder_channels=decoder_channels, n_blocks=encoder_depth, use_batchnorm=decoder_use_batchnorm, pab_channels=decoder_pab_channels ) self.segmentation_head = SegmentationHead( in_channels=decoder_channels[-1], out_channels=classes, activation=activation, kernel_size=3, dropout=dropout, ) if aux_params is not None: self.classification_head = ClassificationHead( in_channels=self.encoder.out_channels[-1], **aux_params ) else: self.classification_head = None self.name = "manet-{}".format(encoder_name) self.initialize()
A comprehensive differential proteomic study of nitrate deprivation in Arabidopsis reveals complex regulatory networks of plant nitrogen responses. Nitrogen (N) is an important nutrient and signal for plant growth and development. However, to date, our knowledge of how plants sense and transduce the N signals is very limited. To better understand the molecular mechanisms of plant N responses, we took two-dimensional gel-based proteomic and phosphoproteomic approaches to profile the proteins with abundance and phosphorylation state changes during nitrate deprivation and recovery in the model plant Arabidopsis thaliana. After 7-day-old seedlings were N-deprived for up to 48 h followed by 24 h recovery, a total of 170 and 38 proteins were identified with significant changes in abundance and phosphorylation state, respectively. Bioinformatic analyses implicate these proteins in diverse cellular processes including N and protein metabolisms, photosynthesis, cytoskeleton, redox homeostasis, and signal transduction. Functional studies of the selected nitrate-responsive proteins indicate that the proteasome regulatory subunit RPT5a and the cytoskeleton protein Tubulin alpha-6 (TUA6) play important roles in plant nitrate responses by regulating plant N use efficiency (NUE) and low nitrate-induced anthocyanin biosynthesis, respectively. In conclusion, our study provides novel insights into plant responses to nitrate at the proteome level, which are expected to be highly useful for dissecting the N response pathways in higher plants and for improving plant NUE.
Ray-tracing model for the stretching calculation of the new type of aberration-free stretcher Chirped pulse amplification(CPA) has become a common technique for the generation of ultrashort and high peak power femtosecond(fs) optical pulses. One key point of this technique is the reasonable design for the pulse stretcher. We improved the aberrationfree stretcher replacing the two spherical mirrors with two concentric rectangular focused cylindrical ones. The first mirror is convex, and the second one concave. Their radius of curvature ratio is two and of opposite sign. This combination cancels spherical aberration and astigmatism, and has no on-axis coma and exhibits no chromatic aberration. We calculated the group velocity dispersion(GVD) of the stretcher by ray-tracing model. Supposing that the distance between the grating and the center of curvature is a variable 1, we obtained the variety of the group delay and the GVD of the stretcher with the parameter 1 and the incidence wavelength A.For the input pulse, let pulsewidth to 9.8fs, the initial chirp C=O.75, the distance l=O.lm, we gained GVD was 2.5fs2/jm for the center wavelength 790nm, the stretching factor was 90197.4, that is, the seed pulse(9.8fs in duration) can be stretched to 884 PS
Effects of Pre- and Postharvest Calcium Treatments on Shelf Life and Postharvest Quality of Broccoli Microgreens We reported previously that the preharvest treatment of broccoli microgreens with 10 mmolL calcium chloride (CaCl2) increased the yield and postharvest quality. The objective of this study was to investigate whether other calcium forms have the similar effect, in particular, after postharvest dip in calcium solution. Our results are as follows: 1) Preharvest spray without postharvest dip: Both 20 mmolL calcium lactate (Ca lactate) and calcium amino acid (Ca AA) chelate significantly improved broccoli microgreens quality and inhibited microbial populations as compared with the wateronly control during storage at 5 8C for 21 days. However, they were less effective than 10 mmolL CaCl2. 2) Postharvest dip without preharvest spray: The microgreens sprayed with water-only control were dipped in 0, 25, 50, or 100 mmolL Ca lactate solution containing 100 mLL chlorine immediately after harvest. During storage at 5 8C for 14 days, 50mmolL Ca lactate dip showed the highest overall quality and lowest tissue electrolyte leakage. 3) Preharvest spray and postharvest dip: Combined preharvest 10 mmolL CaCl2 spray and postharvest 50 mmolL L1 Ca lactate dip resulted in better postharvest quality than individual preor postharvest calcium treatments. However, the preharvest 10 mmolL CaCl2 spray without postharvest dip displayed a best overall visual quality and longest storage life. Our data indicate that preand postharvest calcium treatments have positive effect on maintaining the microgreens quality and extending shelf life. However, current postharvest dip/spinning/drying method profoundly reduces the shelf life due to mechanical damages. Technologies to optimize microgreens wash are needed to provide ready-to-eat product. Alternatively, the wash step can be avoided when the microgreens are grown under controlled settings. Microgreens are cotyledonary-leafed seedlings harvested within 1020 d after vegetable seed germination. In recent years, growing microgreens have become a more common practice for urban farming because of their easiness to handle indoors, and their high nutritional value and sensory appeal (, 2014a; ). However, their commercial production and marketing is limited by their short shelf life due to rapid quality deterioration (Berba and Uchanski, 2012; ; Kou, et al., 2013). Furthermore, since fresh-cut products are marketed as ready-to-eat with no need for a sterilization or pasteurization step, wash is a critical process in the preparation of fresh-cut produce and is often the only step. Wash removes foreign materials as well as tissue fluids and reduces microbial populations. To provide more affordable ready-to-eatmicrogreens to a broader market, it is necessary to develop chlorine wash methods, which can maintain postharvest quality and extend shelf life. Calcium is important for plant growth and development by maintaining and modulating various cellular functions (; Palta, 1996). Calcium alters intracellular and extracellular processes, resulting in retarded ripening as exemplified by lower rates of color change, softening, and CO2 and ethylene production, increase in sugar, and reduction in total acid content (Conway, 1987; ; Raese and Drake, 1993). The preand postharvest application of calcium salts has been used successfully in many fresh fruits to reduce loss of firmness and slow down the ripening process (; ; ; ; ). CaCl2 has been primarily used for preharvest treatment. When it is used in fresh-cut products, it may cause a bitter aftertaste in foods (Bolin and Huxsoll, 1989). However, Ca lactate treatment does not show negative effect on flavor. Therefore, Ca lactate has been suggested as a potential alternative firming additive for use in freshcut fruits (Luna-Guzm an and Barrett, 2000; Yang and Lawsless, 2005). Martn-Diana et al. also compared the efficacies of Ca lactate and chlorine wash treatments of fresh-cut lettuce and carrots during storage at 4 C over 10 d and found that there was no significant differences between treatments. Ca AA chelate formulations represent another Ca source that has been used in the food and/or nutritional industries (). Ca AA chelate is not corrosive to processing equipment and is more likely to penetrate deeply into plant tissues. A postharvest Ca AA dip maintained firmness and doubled the shelf life of intact honeydew fruit (Lester and Grusak, 2001, 2004). Currently, the potential for use of Ca lactate and Ca AA chelate in the microgreens industry has not been explored. Compared with fruits and mature green leaves, fresh-cut microgreens are very tender and subject to much more stress, leading to rapid senescence and a very short shelf life (Watkins and Nock, 2012). For example, the shelf life of broccoli microgreens (Brassica oleracea L. var. italica) is 710 d at 5 C. However, if treated by 10 mmolL CaCl2 before harvest, the shelf life can be extended to 1421 d due to stimulated superoxide dismutase and peroxidase activities, lowered tissue electrolyte leakage, improved overall visual quality, and reduced microbial growth during storage (a). A chemical composition comparison showed that glucosinolates, a very important group of phytochemicals, were the major compounds enhanced by preharvest treatment with 10mmolL CaCl2 (). This study compares the effects of preharvest spray with CaCl2, Ca lactate, or Ca AA chelate, and a postharvest dip in Ca lactate on the quality and shelf life of broccoli microgreens. Received for publication 18 Sept. 2015. Accepted for publication 13 Nov. 2015. We thank Ernie Paroczay for assistance in planting microgreens, and Ellen Turner and Frances Trouth for reviewing and revising the manuscript before its submission. Use of a company or product name by the U.S. Department of Agriculture does not imply approval or recommendation of the product to the exclusion of others that may also be suitable. Corresponding author. E-mail: tianbao.yang@ ars.usda.gov. HORTSCIENCE VOL. 50 DECEMBER 2015 1801 Materials and Methods Plant materials. Broccoli (Brassica oleracea var. italica) cultivar Arcadia seeds were purchased from Living Whole Foods, Inc. (Springville, UT). Hydroponic pads (20.8 25.4 cm; Growers Supply, Dyersville, IA) were made from biodegradable wood fibers. One hydroponic pad was set evenly in one 54 28 6 cm tray (vacuum-formed standard 1020 open flats without holes). The pad was soaked in the 600 mL tap water (pH 5.56.0). The seeds ( 37.8 g) were spread evenly on the wet pad. The trays were kept in the dark in a growth chamber at 25 C for the first 4 d, and then exposed to light with an irradiance of 42 mmolsm, of 12 h/12 h (light/dark) for the next 6 d (). Preharvest calcium treatments. The trays were sprayed once a day with 200 mL H2O (tap water, pH 5.56.0) only; 1, 10, or 20 mmolL Ca AA (Albion Laboratories, Inc., Clearfield, UT); 1, 10, or 20 mmolL Ca lactate; or 10 mmolL CaCl2 (SigmaAldrich, Inc., St. Louis, MO) after sowing the seeds (Table 1). Ten-day-old broccoli microgreens were harvested with a pair of sterilized scissors by cutting at the bottom of the hypocotyls. No damaged leaves were used for analyses. The broccoli microgreens (10 g each) were packaged in sealed bags (10 cm 10 cm) prepared with polyethylene films (Pacific Southwest Container Inc., Modesto, CA) of 16.6 pmolsmPa oxygen transmission rate. Samples were stored at 5 C in the dark for 21 d, with quality evaluation performed on Days 0, 4, 7, 14, and 21. Postharvest calcium treatments. Ten-dayold seedlings treated with water-only during preharvest were moved to the washroom across the aisle. Dip solutions contain 0, 25, 50, or 100 mmolL Ca lactate plus 100 mLL chlorine (sodium hypochlorite, pH 6.5) (Table 1). The freshly cut microgreens (200 g) were placed in predisinfected mesh bags, washed in 40 L dip solutions with gentle agitation for 30 s at room temperature, then centrifuged at 300 rpm for 3 min with a commercial salad centrifugal dryer (model T-304, Garroute Spin Dryer; Meyer Machine Co, San Antonio, TX) to remove excess water. The washed microgreens were packaged in 10 g amounts in each bag and stored at 5 C in the dark for 14 d. Evaluations were performed on Days 0, 4, 7, 11, and 14. Pre-/postharvest calcium treatments. The following combinations of preand postharvest calcium treatments were performed (Table 1). H2O/Cl meant preharvest spray with water only and postharvest dip in 100 mLL chlorine solution; H2O/Ca lactate indicated preharvest water spray and postharvest dip in 50 mmolL Ca lactate and 100 mLL chlorine solution; CaCl2/Cl stood for preharvest 10 mmolL CaCl2 spray and postharvest dip in 100 mLL chlorine solution; CaCl2/Ca lactate represented preharvest 10 mmolL CaCl2 spray and postharvest dip in 50 mmolL Ca lactate with 100 mLL chlorine solution. Microgreens (200 g) were placed in mesh bags, and then washed in 40 L wash solutions with gentle agitation for 30 s. The microgreens (in mesh bags) were centrifuged at 300 rpm for 3 min with a commercial salad centrifugal dryer (model T-304, Garroute Spin Dryer; Meyer Machine Co) to remove excess water. The washed microgreens were packaged in 10 g amounts in each bag. Packaged microgreens were stored at 5 C in the dark for 14 d and evaluations were performed on Days 0, 4, 7, 11, and 14. Postharvest quality and microbiological assessment. The package atmospheres were measured immediately upon removal of the samples from storage. The CO2 and O2 in the headspace of packages containing microgreens were measured as described in Kou et al. (2014a). Overall visual quality was evaluated with a 9-point hedonic scale following a modified procedure fromLuo et al. andMeilgaard et al., where 9, 8, 7, and 6 = like extremely, strongly, moderately, and slightly, Fig. 1. Effects of preharvest spray with different calcium forms on (A) O2 and (B) CO2 partial pressure within packages of broccoli microgreens during 5 C storage. Data presented are the means of four replications; vertical lines represent SEs. Table 1. List of all the treatments. Treatment Preharvest spray Postharvest dip in Abstract. We reported previously that the preharvest treatment of broccoli microgreens with 10 mmol L L1 calcium chloride (CaCl 2 ) increased the yield and postharvest quality. The objective of this study was to investigate whether other calcium forms have the similar effect, in particular, after postharvest dip in calcium solution. Our results are as follows: 1) Preharvest spray without postharvest dip: Both 20 mmol L L1 calcium lactate (Ca lactate) and calcium amino acid (Ca AA) chelate significantly improved broccoli microgreens quality and inhibited microbial populations as compared with the wateronly control during storage at 5 8C for 21 days. However, they were less effective than 10 mmol L L1 CaCl 2. 2) Postharvest dip without preharvest spray: The microgreens sprayed with water-only control were dipped in 0, 25, 50, or 100 mmol L L1 Ca lactate solution containing 100 mL L L1 chlorine immediately after harvest. During storage at 5 8C for 14 days, 50 mmol L L1 Ca lactate dip showed the highest overall quality and lowest tissue electrolyte leakage. 3) Preharvest spray and postharvest dip: Combined preharvest 10 mmol L L1 CaCl 2 spray and postharvest 50 mmol L L1 Ca lactate dip resulted in better postharvest quality than individual pre-or postharvest calcium treatments. However, the preharvest 10 mmol L L1 CaCl 2 spray without postharvest dip displayed a best overall visual quality and longest storage life. Our data indicate that pre-and postharvest calcium treatments have positive effect on maintaining the microgreens quality and extending shelf life. However, current postharvest dip/spinning/drying method profoundly reduces the shelf life due to mechanical damages. Technologies to optimize microgreens wash are needed to provide ready-to-eat product. Alternatively, the wash step can be avoided when the microgreens are grown under controlled settings. Microgreens are cotyledonary-leafed seedlings harvested within 10-20 d after vegetable seed germination. In recent years, growing microgreens have become a more common practice for urban farming because of their easiness to handle indoors, and their high nutritional value and sensory appeal ((Kou et al.,, 2014). However, their commercial production and marketing is limited by their short shelf life due to rapid quality deterioration (Berba and Uchanski, 2012;;Kou, et al., 2013). Furthermore, since fresh-cut products are marketed as ''ready-to-eat'' with no need for a sterilization or pasteurization step, wash is a critical process in the preparation of fresh-cut produce and is often the only step. Wash removes foreign materials as well as tissue fluids and reduces microbial populations. To provide more affordable ready-to-eat microgreens to a broader market, it is necessary to develop chlorine wash methods, which can maintain postharvest quality and extend shelf life. Calcium is important for plant growth and development by maintaining and modulating various cellular functions (;Palta, 1996). Calcium alters intracellular and extracellular processes, resulting in retarded ripening as exemplified by lower rates of color change, softening, and CO 2 and ethylene production, increase in sugar, and reduction in total acid content (Conway, 1987;;Raese and Drake, 1993). The pre-and postharvest application of calcium salts has been used successfully in many fresh fruits to reduce loss of firmness and slow down the ripening process (;;;;). CaCl 2 has been primarily used for preharvest treatment. When it is used in fresh-cut products, it may cause a bitter aftertaste in foods (Bolin and Huxsoll, 1989). However, Ca lactate treatment does not show negative effect on flavor. Therefore, Ca lactate has been suggested as a potential alternative firming additive for use in freshcut fruits (Luna-Guzm an and Barrett, 2000;Yang and Lawsless, 2005). Martn-Diana et al. also compared the efficacies of Ca lactate and chlorine wash treatments of fresh-cut lettuce and carrots during storage at 4°C over 10 d and found that there was no significant differences between treatments. Ca AA chelate formulations represent another Ca source that has been used in the food and/or nutritional industries (). Ca AA chelate is not corrosive to processing equipment and is more likely to penetrate deeply into plant tissues. A postharvest Ca AA dip maintained firmness and doubled the shelf life of intact honeydew fruit Grusak, 2001, 2004). Currently, the potential for use of Ca lactate and Ca AA chelate in the microgreens industry has not been explored. Compared with fruits and mature green leaves, fresh-cut microgreens are very tender and subject to much more stress, leading to rapid senescence and a very short shelf life (Watkins and Nock, 2012). For example, the shelf life of broccoli microgreens (Brassica oleracea L. var. italica) is 7-10 d at 5°C. However, if treated by 10 mmol L -1 CaCl 2 before harvest, the shelf life can be extended to 14-21 d due to stimulated superoxide dismutase and peroxidase activities, lowered tissue electrolyte leakage, improved overall visual quality, and reduced microbial growth during storage (a). A chemical composition comparison showed that glucosinolates, a very important group of phytochemicals, were the major compounds enhanced by preharvest treatment with 10 mmol L -1 CaCl 2 (). This study compares the effects of preharvest spray with CaCl 2, Ca lactate, or Ca AA chelate, and a postharvest dip in Ca lactate on the quality and shelf life of broccoli microgreens. Materials and Methods Plant materials. Broccoli (Brassica oleracea var. italica) cultivar Arcadia seeds were purchased from Living Whole Foods, Inc. (Springville, UT). Hydroponic pads (20.8 25.4 cm; Growers Supply, Dyersville, IA) were made from biodegradable wood fibers. One hydroponic pad was set evenly in one 54 28 6 cm tray (vacuum-formed standard 1020 open flats without holes). The pad was soaked in the 600 mL tap water (pH 5.5-6.0). The seeds (37.8 g) were spread evenly on the wet pad. The trays were kept in the dark in a growth chamber at 25°C for the first 4 d, and then exposed to light with an irradiance of 42 mmol s -1 m -2, of 12 h/12 h (light/dark) for the next 6 d (). Postharvest calcium treatments. Ten-dayold seedlings treated with water-only during preharvest were moved to the washroom across the aisle. Dip solutions contain 0, 25, 50, or 100 mmol L -1 Ca lactate plus 100 mL L -1 chlorine (sodium hypochlorite, pH 6.5) ( Table 1). The freshly cut microgreens (200 g) were placed in predisinfected mesh bags, washed in 40 L dip solutions with gentle agitation for 30 s at room temperature, then centrifuged at 300 rpm for 3 min with a commercial salad centrifugal dryer (model T-304, Garroute Spin Dryer; Meyer Machine Co, San Antonio, TX) to remove excess water. The washed microgreens were packaged in 10 g amounts in each bag and stored at 5°C in the dark for 14 d. Evaluations were performed on Days 0, 4, 7, 11, and 14. Pre-/postharvest calcium treatments. The following combinations of pre-and postharvest calcium treatments were performed (Table 1). H 2 O/Cl meant preharvest spray with water only and postharvest dip in 100 mL L -1 chlorine solution; H 2 O/Ca lactate indicated preharvest water spray and postharvest dip in 50 mmol L -1 Ca lactate and 100 mL L -1 chlorine solution; CaCl 2 /Cl stood for preharvest 10 mmol L -1 CaCl 2 spray and postharvest dip in 100 mL L -1 chlorine solution; CaCl 2 /Ca lactate represented preharvest 10 mmol L -1 CaCl 2 spray and postharvest dip in 50 mmol L -1 Ca lactate with 100 mL L -1 chlorine solution. Microgreens (200 g) were placed in mesh bags, and then washed in 40 L wash solutions with gentle agitation for 30 s. The microgreens (in mesh bags) were centrifuged at 300 rpm for 3 min with a commercial salad centrifugal dryer (model T-304, Garroute Spin Dryer; Meyer Machine Co) to remove excess water. The washed microgreens were packaged in 10 g amounts in each bag. Packaged microgreens were stored at 5°C in the dark for 14 d and evaluations were performed on Days 0, 4, 7, 11, and 14. Postharvest quality and microbiological assessment. The package atmospheres were measured immediately upon removal of the samples from storage. The CO 2 and O 2 in the headspace of packages containing microgreens were measured as described in Kou et al. (2014a). Overall visual quality was evaluated with a 9-point hedonic scale following a modified procedure from Luo et al. and Meilgaard et al., where 9, 8, 7, and 6 = like extremely, strongly, moderately, and slightly, respectively; 5 = neither like nor dislike; and 1, 2, 3, and 4 = dislike extremely, strongly, moderately, and slightly, respectively. All visual quality evaluations were carried out by three trained evaluators. All evaluators had over 5-year research experience with fresh produce, especially performing sensory evaluations of leafy greens. Before the beginning of this experiment, additional training specific to the organoleptic properties of broccoli microgreens were provided to evaluators. The acceptable range for overall visual quality was considered to be a score of 6.0 or above (b). The electrical conductivity of the solution was measured using the same method as described in Kou et al. (2014a). Briefly, 3 g microgreens were submerged in 150 mL deionized water at 20°C and shaken for 30 min. The electrolyte of the solution was measured using a conductivity meter (model 135A; Orion Research, Inc., Beverly, MA). Total electrolytes were obtained after freezing the samples at -20°C for 24 h and subsequent thawing, and expressed as a percentage of the total electrolyte. Microbial growth on broccoli microgreens was assayed following a procedure from Luo et al. with some modifications. Each 3 g microgreens was macerated in 27 mL phosphatebuffered saline, using a model 80 Laboratory Stomacher (Seward Medical, London, UK) for 2 min at high speed in filtered stomacher bags. A 50 mL sample of each filtrate or its appropriate dilution was logarithmically spread on agar plates with an automated spiral plater (Wasp II; Don Whitley Scientific Ltd., West Yorkshire, UK). Enumeration of microorganisms was performed using the following culture media and conditions: 1) tryptic soy agar (Difco Laboratory, Sparks, MD) incubated at 28°C for 24 h for the enumeration of total aerobic mesophilic bacteria (AMB) and 2) potato dextrose agar (Difco Laboratory) supplemented with 200 g mL -1 chloramphenicol incubated at room temperature (22°C) for 48 h for the enumeration of yeasts and molds (Y&M). Microbial colonies were counted using a Pro-toCOL Colony Counter 50000 (Synoptics, Cambridge, UK) and reported as Log cfu/g (Log colony-forming unit per gram tissue). Experimental design and statistical analysis. Package atmospheres, tissue electrolyte leakage, and microbial data were analyzed as two-factor linear models using the PROC MIXED procedure (SAS Institute Inc., 1999, Cary, NC). The two factors were storage time and treatment type. Different samples were analyzed on each evaluation day for all studies. Four replications (four bags) per treatment per evaluation period were examined. All the experiments were repeated three times. At each time, we had four technical repeats. Data presented are the results from one representative experiment. Assumptions of normality and variance homogeneity of the linear model were checked and the variance grouping technique was used to correct for variance heterogeneity. When effects were statistically significant, means were compared using Sidak adjusted P values to maintain experiment-wise error #0.05. Results and Discussion Effects of different preharvest calcium treatments on the postharvest quality of broccoli microgreens. From Day 0 to 4, oxygen partial pressures in all the samples decreased rapidly (Fig. 1A), nearly reaching equilibrium by Day 4. Water-only control exhibited the lowest O 2 level from Day 4 to 21. All the calcium treatments had a slightly higher yet constant O 2 (1-2.2 kPa) until the end of storage. In comparison, CO 2 partial pressure for all treatments increased during the first 4 d, and then declined to 3.7 kPa. No significant difference was observed among all the calcium treatments and water-only control (Fig. 1B). These results suggest that the preharvest calcium treatments had no significant effect on broccoli microgreens' respiration during cold storage. Other studies also show that under low temperature storage, calcium treatment did not affect the respiration rate on lettuce, carrot, 'Vogue' cherry, and apple (Duque and Arrabac xa, 1999;;). Total AMB growth of all samples increased significantly (P < 0.001) over storage time ( Fig. 2A). However, on Day 21, the numbers of total AMB in calcium treatments were lower than water-only control. In particular, 10 mmol L -1 CaCl 2 -treated samples had significantly (P < 0.05) lower total AMB (8.9 Log cfu/g) growth than those sprayed with H 2 O (9.9 Log cfu/g) at Day 21. Nonetheless, no significant (P > 0.05) difference was found among all other calcium treatments. Several calcium treatments also displayed significantly inhibitory effect on Y&M growth (Fig. 2B). Among these treatments, 10 mmol L -1 CaCl 2 treatment was the most effective. On Day 21, 10 mmol L -1 CaCl 2 -treated samples averaged 1.2 Log cfu/g (P < 0.05) fewer Y&M colonies than the water-only treatment. 1 and 20 mM Ca lactate also had obvious inhibitory effect at Day 21. We further measured tissue electrolyte leakage because it was closely related to the tissue integrity and shelf life of fresh-cut produce (;). All the calcium-treated samples had lower tissue electrolyte leakage (P < 0.05) than water-only control samples during the entire storage period (Fig. 3A). Samples treated with higher calcium concentrations had significant lower tissue electrolyte leakage than those treated with low concentration (1 mmol L -1 ). At Day 21, the highest tissue electrolyte leakage (18.75%) occurred in water-only treated samples, whereas 10 mmol L -1 CaCl 2 treatment had the lowest tissue electrolyte leakage values of 3.56%. Tissue electrolyte leakage in 20 mmol L -1 Ca lactate, 10 mmol L -1 Ca lactate, 20 mmol L -1 Ca AA and 10 mmol L -1 Ca AA were 3.97%, 4.00%, 4.02%, and 10.4%, respectively. Calcium application has shown to increase membrane integrity and stability, and decrease electrolyte leakage (;Poovaiah, 1986). Calcium may also be involved in regulating membrane stability and the senescence of plant cells (Rubinstein, 2000;). Less disruption in plasma membranes led to lower tissue electrolyte leakage (). Therefore, preharvest calcium spray might increase broccoli membrane integrity and reduce tissue electrolyte leakage. Overall quality is an important factor influencing the marketability of food products. All calcium-treated samples retained superior quality over water-treated samples from Day 7 onwards (Fig. 3B). From Day 14 onwards, yellowing leaves and moisture accumulation were observed on broccoli microgreens, which resulted in reduced quality scores. On Day 21, the overall quality scores in all calcium-treated samples declined to 5.2-6.3. The 10 mmol L -1 CaCl 2 -treated samples had the highest overall visual quality, especially from Day 14 to 21. On Day 21, 20 mmol L -1 Ca lactate also maintained a better overall quality score (scores of 5.7), while water-treated seedlings had the lowest overall quality (score of 2.9). The results indicated that the preharvest treatment with all three forms of calcium had a positive effect on postharvest quality and prolonged shelf life of microgreens. The overall quality results agreed well with those from tissue electrolyte leakage, suggesting that the loss of visual appeal was related to senescence. Effect of postharvest calcium lactate wash/ dip on the quality of broccoli microgreens. Since preharvest treatment with Ca AA was least effective, we only selected Ca lactate for postharvest treatments. After dipping, spinning, and drying, the shelf life for all samples was reduced to 14 d from 21 d. This could have been resulted from the tissue physical damage to the tissue during spinning and drying because the microgreens were very tender. During the first 3 days of storage after treatments, the headspace O 2 concentration of all the samples dropped rapidly to under 1 kPa, and slowly dropped to near 0 at the end of storage (Fig. 4A). However, the CO 2 level increased rapidly during the first 3 d, followed by a rapid decrease, then maintained a constant level (4 kPa) of CO 2 (Fig. 4B). No significant differences were observed between the dip in Ca lactate and dip in chlorinated water only. These results suggest that postharvest calcium treatment had no significant effect on O 2 depletion and CO 2 evolution rates for broccoli microgreens. Significant (P < 0.05) differences were detected among dip treatments with different Ca lactate concentrations for tissue electrolyte leakage and overall quality ( Fig. 5A and B). Broccoli microgreens treated with 50 mmol L -1 Ca lactate maintained the lowest tissue electrolyte leakage (4.2%) throughout the 14-day storage period and had the highest overall quality score (6.0) on Day 14. Water-treated samples had significantly (P < 0.01) higher tissue electrolyte leakage and lower overall quality than all calcium treated ones. These HORTSCIENCE VOL. 50 DECEMBER 2015 results suggest that 25 mmol L -1 is insufficient to act and 100 mmol L -1 is likely toxic for microgreens. Fifty mmol L -1 Ca lactate dip had a positive effect on extending the shelf life and keeping the lower tissue electrolyte leakage. However, in general, the spinning and drying after dip dramatically accelerated tissue senescence and quality deterioration. Effect of combined pre-/postharvest calcium treatment on the quality of broccoli microgreens. We further tested the effect of combining preharvest 10 mmol L -1 CaCl 2 and postharvest 50 mmol L -1 Ca lactate treatments on the quality of microgreens. During the entire 14-day storage period, no significant differences were found in the changes in O 2 and CO 2 composition in packages between any of the treatments (Fig. 6A and B). These results were in agreement with those observed for the separate pre-and postharvest calcium treatments (Figs. 1 and 4). AMB populations for all the treatments increased significantly (P < 0.001) during storage (Fig. 7A). Overall, there was no significant difference between any of the different H 2 O or calcium dip treatments. In comparison, samples with preharvest CaCl 2 spray but no postharvest treatment had significantly (P < 0.01) lower bacterial populations than those receiving other pre-/postharvest treatments. These results suggest that the increased bacterial populations resulted from the tissue damage and/or contamination during postharvest dip. Similar to AMB populations, Y&M populations had an increasing trend during the storage (Fig. 7B). From Day 0 to 7, Y&M populations maintained stable levels (5.2-6.0 Log cfu/g). However, from Day 7 to 14, there was a rapid increase in Y&M populations for all the treatments. The preharvest treatments with CaCl 2 without postharvest treatment had significantly (P < 0.01) lower Y&M populations than all other combinations of pre/ postharvest calcium treatments. Lee et al. reported a similar result for 'Tah tasai' Chinese cabbage microgreens treated with chlorinated water. To reduce tissue damage, a slow spin speed was used to dry microgreens. However, excess moisture remaining on washed leaf surfaces might promote microbial growth in those packages. There was no significant difference for tissue electrolyte leakage among all treatments except, from Day 11 onwards, tissue electrolyte leakage for H 2 O/Cl treatment was significantly higher (P < 0.001) than that of other treatments and remained higher (P < 0.01) through the end of storage (Fig. 8A). Overall quality for all the pre-and postharvest combined treatments declined significantly (P < 0.05) during storage (Fig. 8B). However, preharvest CaCl 2 without postharvest dip maintained the highest overall quality score and the lowest tissue electrolyte leakage during the entire 14 d storage. In contrast, H 2 O/Cl had the lowest quality score. These results suggest that preharvest calcium spray is more efficient than postharvest dip. There were a few factors influencing the results. First, calcium solution was sprayed every day during preharvest treatment. However, postharvest calcium treatment consisted of only a 30 s dip. Second, postharvest dip was applied just before packaging and extra moisture on the surface was not easily removed without resulting in dehydration and or additional tissue damage to the already cut tissues. Third, spinning and drying processes caused tissue injury which might accelerate quality deterioration and encourage microbial growth. On buckwheat microgreens, we also found that unwashed samples maintained better visual quality and lower tissue electrolyte leakage than washed samples (). Therefore, the spinning and drying steps were the major factors to reduce the microgreen postharvest quality. In order for processors to be able to provide safe ready-toeat products, improved wash and drying technologies for microgreens need to be developed. Conclusions In this study, the effects of various pre/ postharvest calcium treatments on the quality and shelf life of broccoli microgreens were evaluated. Results indicated that preharvest spray with 10 mmol L -1 CaCl 2 without postharvest wash was the most efficient treatment for broccoli microgreens as compared with Ca lactate and Ca AA chelate. Preharvest spray with 10 mmol L -1 CaCl 2 without postharvest wash significantly reduced tissue electrolyte leakage and microbial growth, and delayed decline of overall quality of microgreens during storage. Postharvest dip with 50 mmol L -1 Ca lactate or combination of preharvest spray with CaCl 2 /postharvest dip with Ca lactate exhibited some beneficial effects, which reduced tissue electrolyte leakage, AMB, and Y&M as compared with no pre-and postharvest calcium treatment. However, current dip/wash and drying procedures significantly reduce the quality of the broccoli microgreens since broccoli microgreens are very delicate. Improved wash/drying technologies are necessary to provide ready-to-eat microgreens with better quality and longer shelf life. Optionally, the postharvest wash step can be avoided when the microgreens are grown under controlled settings to minimize the microbial contamination. Microgreens crops usually are grown indoors. Thus, the materials used for propagation can be easily decontaminated to maintain compliance with food safety regulations.
""" Program: ArduinoMegaI2C.py Revised On: 11/21/2019 """ ### Library Imports import pigpio ### ### Class Definition class ArduinoMegaI2C: def __init__(self, addr, i2c_channel=1): self.ch = i2c_channel self.addr = addr (self.pi, self.handle) = self.__open_i2c() def __open_i2c(self): pi = pigpio.pi() try: handleIMU = pi.i2c_open(self.ch, self.addr) return (pi, handleIMU) except: print('I2C open failed.') return (-1, -1) def __write_bytes(self, data): self.pi.i2c_write_device(self.handle, data) def __read_bytes(self, numBytes): (count, dataByteArray) = self.pi.i2c_read_device(self.handle, numBytes) if(count > 1): return (count, list(dataByteArray)) else: return (count, list(dataByteArray)) def set_velocities(self, data): ## data = [vel1, vel2, vel3, vel4] self.__write_bytes(data) def get_velocities(self): (count, data) = self.__read_bytes(4) print(count) if(count == 4): return data else: return [-999, -999, -999, -999] def close(self): self.pi.i2c_close(self.handle) self.pi.stop() ###
// Code generated by protoc-gen-go. DO NOT EDIT. // source: yandex/cloud/mdb/mongodb/v1/user.proto package mongodb // import "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/yandex-cloud/go-genproto/yandex/cloud/validation" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A MongoDB User resource. For more information, see the // [Developer's Guide](/docs/mdb/concepts). type User struct { // Name of the MongoDB user. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // ID of the MongoDB cluster the user belongs to. ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Set of permissions granted to the user. Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *User) Reset() { *m = User{} } func (m *User) String() string { return proto.CompactTextString(m) } func (*User) ProtoMessage() {} func (*User) Descriptor() ([]byte, []int) { return fileDescriptor_user_5f201dd2b4d474a9, []int{0} } func (m *User) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_User.Unmarshal(m, b) } func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_User.Marshal(b, m, deterministic) } func (dst *User) XXX_Merge(src proto.Message) { xxx_messageInfo_User.Merge(dst, src) } func (m *User) XXX_Size() int { return xxx_messageInfo_User.Size(m) } func (m *User) XXX_DiscardUnknown() { xxx_messageInfo_User.DiscardUnknown(m) } var xxx_messageInfo_User proto.InternalMessageInfo func (m *User) GetName() string { if m != nil { return m.Name } return "" } func (m *User) GetClusterId() string { if m != nil { return m.ClusterId } return "" } func (m *User) GetPermissions() []*Permission { if m != nil { return m.Permissions } return nil } type Permission struct { // Name of the database that the permission grants access to. DatabaseName string `protobuf:"bytes,1,opt,name=database_name,json=databaseName,proto3" json:"database_name,omitempty"` // MongoDB roles for the [database_name] database that the permission grants. Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Permission) Reset() { *m = Permission{} } func (m *Permission) String() string { return proto.CompactTextString(m) } func (*Permission) ProtoMessage() {} func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptor_user_5f201dd2b4d474a9, []int{1} } func (m *Permission) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Permission.Unmarshal(m, b) } func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Permission.Marshal(b, m, deterministic) } func (dst *Permission) XXX_Merge(src proto.Message) { xxx_messageInfo_Permission.Merge(dst, src) } func (m *Permission) XXX_Size() int { return xxx_messageInfo_Permission.Size(m) } func (m *Permission) XXX_DiscardUnknown() { xxx_messageInfo_Permission.DiscardUnknown(m) } var xxx_messageInfo_Permission proto.InternalMessageInfo func (m *Permission) GetDatabaseName() string { if m != nil { return m.DatabaseName } return "" } func (m *Permission) GetRoles() []string { if m != nil { return m.Roles } return nil } type UserSpec struct { // Name of the MongoDB user. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Password of the MongoDB user. Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` // Set of permissions to grant to the user. Permissions []*Permission `protobuf:"bytes,3,rep,name=permissions,proto3" json:"permissions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *UserSpec) Reset() { *m = UserSpec{} } func (m *UserSpec) String() string { return proto.CompactTextString(m) } func (*UserSpec) ProtoMessage() {} func (*UserSpec) Descriptor() ([]byte, []int) { return fileDescriptor_user_5f201dd2b4d474a9, []int{2} } func (m *UserSpec) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UserSpec.Unmarshal(m, b) } func (m *UserSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UserSpec.Marshal(b, m, deterministic) } func (dst *UserSpec) XXX_Merge(src proto.Message) { xxx_messageInfo_UserSpec.Merge(dst, src) } func (m *UserSpec) XXX_Size() int { return xxx_messageInfo_UserSpec.Size(m) } func (m *UserSpec) XXX_DiscardUnknown() { xxx_messageInfo_UserSpec.DiscardUnknown(m) } var xxx_messageInfo_UserSpec proto.InternalMessageInfo func (m *UserSpec) GetName() string { if m != nil { return m.Name } return "" } func (m *UserSpec) GetPassword() string { if m != nil { return m.Password } return "" } func (m *UserSpec) GetPermissions() []*Permission { if m != nil { return m.Permissions } return nil } func init() { proto.RegisterType((*User)(nil), "yandex.cloud.mdb.mongodb.v1.User") proto.RegisterType((*Permission)(nil), "yandex.cloud.mdb.mongodb.v1.Permission") proto.RegisterType((*UserSpec)(nil), "yandex.cloud.mdb.mongodb.v1.UserSpec") } func init() { proto.RegisterFile("yandex/cloud/mdb/mongodb/v1/user.proto", fileDescriptor_user_5f201dd2b4d474a9) } var fileDescriptor_user_5f201dd2b4d474a9 = []byte{ // 344 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xcd, 0x4a, 0xfb, 0x40, 0x14, 0xc5, 0x49, 0x3f, 0xfe, 0xb4, 0xd3, 0x7f, 0x37, 0x83, 0x8b, 0xa0, 0x14, 0x4a, 0x05, 0xad, 0xc8, 0xcc, 0x38, 0x2d, 0x48, 0xc5, 0x95, 0xdd, 0x94, 0x6e, 0x44, 0x22, 0x6e, 0x2a, 0x52, 0x26, 0x99, 0x21, 0x06, 0x92, 0x99, 0x30, 0x93, 0xd4, 0x8f, 0xb5, 0xab, 0xbe, 0x8e, 0x0f, 0xd1, 0x3e, 0x8a, 0x6b, 0x9f, 0x40, 0x9a, 0xc4, 0x1a, 0x37, 0x5d, 0xb9, 0xbb, 0x73, 0xef, 0x39, 0x97, 0xf3, 0xe3, 0x0e, 0x38, 0x7a, 0x61, 0x92, 0x8b, 0x67, 0xe2, 0x85, 0x2a, 0xe5, 0x24, 0xe2, 0x2e, 0x89, 0x94, 0xf4, 0x15, 0x77, 0xc9, 0x82, 0x92, 0xd4, 0x08, 0x8d, 0x63, 0xad, 0x12, 0x05, 0x0f, 0x72, 0x1d, 0xce, 0x74, 0x38, 0xe2, 0x2e, 0x2e, 0x74, 0x78, 0x41, 0xf7, 0x3b, 0xbf, 0x96, 0x2c, 0x58, 0x18, 0x70, 0x96, 0x04, 0x4a, 0xe6, 0xde, 0xde, 0x9b, 0x05, 0x6a, 0x77, 0x46, 0x68, 0x08, 0x41, 0x4d, 0xb2, 0x48, 0xd8, 0x56, 0xd7, 0xea, 0x37, 0x9d, 0xac, 0x86, 0x1d, 0x00, 0xbc, 0x30, 0x35, 0x89, 0xd0, 0xf3, 0x80, 0xdb, 0x95, 0x6c, 0xd2, 0x2c, 0x3a, 0x53, 0x0e, 0xa7, 0xa0, 0x15, 0x0b, 0x1d, 0x05, 0xc6, 0x04, 0x4a, 0x1a, 0xbb, 0xda, 0xad, 0xf6, 0x5b, 0x83, 0x63, 0xbc, 0x23, 0x0d, 0xbe, 0xd9, 0xea, 0x9d, 0xb2, 0xb7, 0x37, 0x01, 0xe0, 0x67, 0x04, 0x0f, 0x41, 0x9b, 0xb3, 0x84, 0xb9, 0xcc, 0x88, 0x79, 0x29, 0xd4, 0xff, 0xef, 0xe6, 0xf5, 0x26, 0xdc, 0x1e, 0xa8, 0x6b, 0x15, 0x0a, 0x63, 0x57, 0xba, 0xd5, 0x7e, 0xd3, 0xc9, 0x1f, 0xbd, 0x77, 0x0b, 0x34, 0x36, 0x3c, 0xb7, 0xb1, 0xf0, 0x20, 0x2d, 0x33, 0x8d, 0x3b, 0x1f, 0x2b, 0x6a, 0x7d, 0xae, 0x68, 0xfb, 0x9e, 0xa1, 0xd7, 0x2b, 0x34, 0x3b, 0x43, 0x17, 0xf3, 0x87, 0xd3, 0xe5, 0x9a, 0xd6, 0x28, 0x3a, 0x1f, 0x16, 0xc8, 0x27, 0xa0, 0x11, 0x33, 0x63, 0x9e, 0x94, 0x2e, 0x80, 0xc7, 0xed, 0x8d, 0x6d, 0xb9, 0xa6, 0xf5, 0x11, 0xa2, 0x83, 0x91, 0xb3, 0x1d, 0xff, 0x21, 0xfe, 0x78, 0x3a, 0x9b, 0xf8, 0x41, 0xf2, 0x98, 0xba, 0xd8, 0x53, 0x11, 0xc9, 0x37, 0xa0, 0xfc, 0x62, 0xbe, 0x42, 0xbe, 0x90, 0xd9, 0xb1, 0xc8, 0x8e, 0xff, 0x70, 0x59, 0x94, 0xee, 0xbf, 0x4c, 0x3a, 0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xf1, 0xde, 0x7c, 0x3d, 0x02, 0x00, 0x00, }
The Experimental Study on the Performance of Anchor Material of Mooring System for Deepwater Drilling Based on a success case of deepwater drilling at Equatorial Guinea in West Africa Gulf, an experimental study has been made, which was focused on the chemical composition and microstructure of the anchor material of the mooring system used in the drilling process. The percentage composition of chemical elements in the anchor material and the material microstructure were obtained. The experimental results showed: to a certain extent, the anchor material met the performance requirements for deepwater drilling at Equatorial Guinea as a result of the reasonable percentage content of each chemical element and the ultrafine structure. Through the experimental analysis, some performance characteristics and microstructure feature that the anchor material should have were further made clear to adapt to the deepwater drilling.
/** * Create new fifo data struct * * @return pointer to new FIFO or NULL if system out of memory *****************************************************************************/ FIFO * fifo_create(void) { return (FIFO *) g_malloc(sizeof(FIFO), 1); }
Here’s the thing about MTV’s True Life series that may surprise you: it’s actually good. MTV isn’t really the place you automatically turn to for hard-hitting documentaries, but True Life has consistently faced some of the most hot-button issues and interesting stories, all without sensationalizing. Sure, there was True Life: I’m a Staten Island Girl, but episodes like True Life: I’m Going to Fat Camp and True Life: I’m Coming Out give these amazing, tender looks at people who so often get marginalized or blown up into caricatures. MTV is coming back with four new True Life episodes this weekend, airing in a marathon this Saturday afternoon from 2 p.m. to 6 p.m. The titles are as follows: True Life: I’m Looking for my Father, True Life: I’m Going To Rehab, True Life: I Live In the Projects and True Life: I’m In An Arranged Marriage. All of these subjects sound potentially controversial or, yes, crazy, but I feel like we can count on MTV to take a pretty reasonable approach to all of them. There’s a video clip from I Live in the Projects available here. If you’ve never seen True Life before, it gives you a good idea of the kind of stories they usually tell: young people who seem pretty typical, but can be surprisingly insightful and fascinating when a little attention is paid to their stories. I Live in the Projects sounds like a particularly important episode, given the bad reputation given to the thousands of people who live in government subsidized housing. Yeah, going to fat camp might be tough, but growing up in a neighborhood full of crime? That’s a story worth telling. You can catch True Life on MTV pretty much all the time, but for the first look at these episodes, tune in tomorrow afternoon. You know it’s cold outside and you don’t want to do anything else.
/** * Takes the maximum number of players for a game as argument. * @param args * the maximum number of players for a game * @throws RemoteException * @throws AlreadyBoundException */ public static void main(String[] args) throws RemoteException, AlreadyBoundException { int maxPlayers = DEFAULT_PLAYERS_NUMBER; if (args.length > 0) { try { maxPlayers = Integer.parseInt(args[0]); System.out.println(String.format("Max players: %d", maxPlayers)); } catch (NumberFormatException e) { LOGGER.warning("Argument" + args[0] + " must be an integer."); System.exit(1); } } else { System.out.println(String.format("Max players: %d (default value)", maxPlayers)); } Server server = new Server(maxPlayers); LOGGER.info("STARTING RMI SERVER"); server.startRMI(); LOGGER.info("STARTING SOCKET SERVER"); server.startSocket(); }
import 'tslib' import Skeleton from './skeleton' console.log(Skeleton.say('ts-skeleton'))
// RemoveUnusedReferences sets ds.Resources to a new map that that contains // only datasets refrerenced in the provided select statement, // it errors if it cannot find a named dataset from the provided ds.Resources map. func RemoveUnusedReferences(stmt Statement, q *dataset.Transform) error { sel, ok := stmt.(*Select) if !ok { return NotYetImplemented("statements other than select") } resources := map[string]*dataset.Dataset{} for _, name := range sel.From.TableNames() { datas := q.Resources[name] if datas == nil { return ErrUnrecognizedReference(name) } resources[name] = datas } q.Resources = resources return nil }
Jamby Madrigal Early life and career Jamby Madrigal was born in Manila, Philippines on April 26, 1958 to Antonio Madrigal (1918–2006) and Amanda Teopaco Abad Santos. She is the granddaughter of the former Supreme Court Chief Justice José Abad Santos of San Fernando, Pampanga. Her grand uncle – pre-Commonwealth Assemblyman Pedro Abad Santos – founded the Socialist Party of the Philippines. Her paternal grandfather was billionaire businessman and Senator Vicente Lopez Madrigal of Ligao, Albay. Her aunt, Pacita Madrigal-Warns, was a Senator during the Quezon and Magsaysay administrations and was the first administrator of the Social Welfare Administration, the predecessor of today's Department of Social Welfare and Development (DSWD). Her late uncle and aunt were former Acting Minister of Foreign Affairs Manuel Collantes and heiress Consuelo "Chito" Madrigal. In addition to her work for her numerous foundations, in 2003 she has become spokesperson for the youth-based Kontra Pulitika Movement (KPM), which champions education, protection of the environment and economic empowerment through livelihood programs. She has acted in a movie on the life of Luis Taruc, the Kapampangan founder of the Hukbo ng Bayan Laban sa mga Hapon (Hukbalahap). Ka Luis was the protégé of her grandfather and great uncle, the Abad Santos brothers. In the film, ‘Anak Pawis’, she portrays her grandmother, Amanda Teopaco. Madrigal obtained a bachelor's degree in economics from the Santa Clara University and a master's in development economics from Yale University, both in the United States. Political career In October 1999, President Joseph Estrada created the Office of the Presidential Adviser for Children's Affairs and Jamby to head this office. She organized the First National Summit for Children in Malacañang Palace on October 26, 2001 where government agencies, local government units, industry leaders and non-government organizations signed a declaration of commitment upholding Child 21 – a framework on which to anchor all action plans and strategies relating to children. She traveled nationwide coordinating the agency's feeding and educational programs. She became concerned over the fate of teachers and school children who were taken hostage by the Abu Sayyaf terrorists in 2001. After consultation with the victims themselves, she sought the help of clinical psychologists from Ateneo de Manila University and Ateneo de Zamboanga University. Madrigal was elected to the Senate in 2004 and was chairman of four Senate committees: Environment; Youth, Women and Family Relations; Peace, Unification and Reconciliation; and Cultural Communities. In an opposition protest, Madrigal was one of the political leaders who were subjected to the Manila Police water cannons while attending a religious procession on October 14, 2005. Madrigal declared her candidacy for president in the 2010 presidential elections. During the race, Madrigal launched many allegations of corruption against fellow Senator Manny Villar, who was also running. Over the course of the campaign, Madrigal brought out "700 pages of evidence" to prove that Villar had "realigned C-5 (a main Metro Manila thoroughfare) to pass by [Villar's] real estate developments so that [Villar] would be paid for right of way." Personal life She married Frenchman Eric Jean Claude Dudoignon Valade on December 7, 2002 at the Calatagan, Batangas farm estate of her aunt, Doña Consuelo "Chito" Madrigal-Collantes. In May 2008 Jamby Madrigal formally filed court pleadings to contest the validity of the last will and testament of her late aunt Chito Madrigal-Collantes.
1. Field of the Invention The present invention relates to a Group III nitride compound semiconductor light-emitting element and particularly to a Group III nitride compound semiconductor light-emitting element in which the wavelength of light emitted from a light-emitting layer is converted so that light in a color different from the color of the light emitted from the light-emitting layer can be emitted. The light-emitting element according to the invention can be used, for example, for a polychromatic light source or a white light source. 2. Description of the Related Art There is known a light-emitting element or a light-emitting device (LED) of the type which uses a Group III nitride compound semiconductor light-emitting element and a fluorescent substance in combination so that the fluorescent substance can convert the wavelength of a part of light emitted from a light-emitting layer contained in the light-emitting element to thereby emit light in a color different from the original color of light emitted from the light-emitting element. For example, an LED of the type has been already put into practical use. In the LED of this type, a Group III nitride compound semiconductor light-emitting element for emitting blue light, and a fluorescent substance for emitting yellow light when excited by the blue light are used in combination so that white light can be emitted. The LED of this type is generally formed so that the light-release side of the Group III nitride compound semiconductor light-emitting element is coated with a light-transmissive resin containing a fluorescent substance. To produce the related-art LED, it was necessary to first produce a light-emitting element, and then coat the light-emitting element with a fluorescent substance-containing resin. That is, the production process was troublesome and complex, and the production cost was high. Moreover, because the light-emitting element was coated with the fluorescent substance-containing resin generally by means of application, dipping or the like, it was not easy to control the thickness of the resin accurately. For this reason, the amount of the fluorescent substance to be added varied easily, so that it was not easy to produce the LED with uniform quality (emission color, emission intensity, and so on). Moreover, because the fluorescent substance was disposed in a position far from a light-emitting portion (light-emitting layer) in the light-emitting element, and because a part of light emitted from the light-emitting element was consumed wastefully as a loss in the interface of the fluorescent substance-containing resin, light could not be always emitted with high efficiency from the fluorescent substance. For this reason, the light-emitting efficiency of the LED was low.
The man saw that his walnut cracker looked similar to a grenade pictured on a leaflet warning about forbidden explosives. BEIJING - A hand grenade was used to crack walnuts for 25 years by a villager in China who had no idea what he was using until he saw a photo of a grenade on a leaflet handed out by local police. The man from Ankang, in China's Shaanxi province, saw that his walnut cracker looked similar to a grenade pictured on a leaflet warning about forbidden explosives, according to Huanqiu.com. According to villagers, the man used the grenade specifically to crack open the nuts. According to reports, the man had been using the grenade without realising what it was for some 25 years, said Britain's Mail Online on Dec 21. It had been given to him by a friend. Images taken on Dec 5 (2016) show the device. Police found that the grenade had not detonated and they were unsure if the device had explosives inside it, the Mail said. It was only when the man was handed a police leaflet that he realised that he possessed a forbidden explosive. Grenades usually explode when the safety lever is released and the object is thrown away. As it rotates, it detonates the primer and ignites the fuse before burning down to the detonator, said the Mail. People have been commenting on social media site Weibo about the man's lucky escape. One user wrote: "Why would a friend gift him a bomb?" Another said: "It's more stable than a Samsung phone."
Behavioral Health Services Following Release From Jail: A Widening Racial Disparity Gap. Black and Native people continue to be grossly overrepresented in jails at 592 and 401, respectively, per 100000 people compared with White people, who are incarcerated at a rate of 187 per 100 000 1 HEALTH AND MENTAL HEALTH IN JAILS Jails concentrate people with highly infectious and chronic diseases and untreated mental illness and substance use problems, which contributes to the health inequities in the communities to which they return 2 We see this today more than ever, with jails being vectors for spreading COVID-19 3 People spend an average of 25 days in jail1;these short stays can disrupt established mental health care and bring infectious disease home to people's families and neighbors Hedden et al suggest several solutions, including the application of critical race theory to policy and practice in the criminal-legal and behavioral health fields, authentic leadership that mirrors affected populations, and culturally responsive interventions to address systemic and individual barriers Policy reform and the development and testing of interventions that work for people of color with SMI who enter orare at risk for entering the criminal-legal system are essential in closing the gap between need and service utilization in this critical postincarceration period
Stability studies of high-stable water-in-oil model emulsions ABSTRACT Different compositions and emulsification protocols were used to prepare stable water-in-oil (w/o) emulsions. Water, mineral oil, and a mixture of Span 80 and Tween 80 surfactants were combined to form emulsions that can be used as reference for electrolyte-free systems. Here, we have proposed emulsions wherein different properties were evaluated. Electrical conductivity measurements indicated that conductivity increases linearly with increasing surfactant content. The emulsions flow curves and viscoelastic behaviors were delineated by rheological measurements. Stability studies by centrifugal testing have shown that smaller the surfactant content, lower the stability, for any used stirring speeds. Furthermore, higher the applied mixing rate to make the emulsion, higher the stability, regardless of the amount of surfactant. Electrical field stability analysis showed, for all systems, that critical electric field (CEF) values were dependent on either surfactant amount and emulsion elastic modulus. GRAPHICAL ABSTRACT
#include "Robot.h" void Robot::TestPeriodic() { AutonomousTest(); } void Robot::TestInit() { SmartDashboard::PutNumber("Elevator Setpoint", 0); SmartDashboard::PutBoolean("Reset Elevator Encoder?", false); SmartDashboard::PutNumber("Elevator P", 0.03); SmartDashboard::PutNumber("Elevator Constant", 0.5); SmartDashboard::PutNumber("Elevator Limit", 0.8); StopCurrentProcesses(); if(SmartDashboard::GetBoolean("Test Angle", 0)) { TurnAngleTest(0); } else if(SmartDashboard::GetBoolean("Test Maintain", 0)) { MaintainHeadingTest(); } else if(SmartDashboard::GetBoolean("Test Distance", 0)) { DriveDistanceTest(0); } } void Robot::TeleopTest() { if(SmartDashboard::GetBoolean("Drive", 0)) DriveTest(); if(SmartDashboard::GetBoolean("Full Elevator", 0)) { FullElevatorTest(); SmartDashboard::PutBoolean("PID Elevator", 0); SmartDashboard::PutBoolean("Manual Elevator", 0); SmartDashboard::PutNumber("Elevator Height", ElevatorPID.PIDGet()); } else if(SmartDashboard::GetBoolean("Full Elevator", 0)) { ManualElevatorTest(); SmartDashboard::PutBoolean("PID Elevator", 0); SmartDashboard::PutBoolean("Full Elevator", 0); SmartDashboard::PutNumber("Elevator Height", ElevatorPID.PIDGet()); } else if(SmartDashboard::GetBoolean("PID Elevator", 0)) { PIDElevatorTest(); SmartDashboard::PutBoolean("PID Elevator", 0); SmartDashboard::PutBoolean("Manual Elevator", 0); SmartDashboard::PutNumber("Elevator Height", ElevatorPID.PIDGet()); } if(SmartDashboard::GetBoolean("Manual Elevator", 0)) ManualElevatorTest(); if(SmartDashboard::GetBoolean("PID Elevator", 0)) PIDElevatorTest(); if(SmartDashboard::GetBoolean("Linkage", 0)) LinkageTest(); if(SmartDashboard::GetBoolean("Intake", 0)) IntakeTest(); } void Robot::AutonomousTest() { // SmartDashboard::PutNumber("Auto Pos Val", (int) AutoLocationChooser->GetSelected()); // SmartDashboard::PutNumber("Auto Obj Val", (int) AutoObjectiveChooser->GetSelected()); //Display Data SmartDashboard::PutNumber("Angle Sensor", AngleSensors.GetAngle()); SmartDashboard::PutNumber("Encoder R", DistancePID.PIDGet()); SmartDashboard::PutNumber("Elevator Height", ElevatorPID.PIDGet()); if(SmartDashboard::GetBoolean("Reset Elevator Encoder?", false)) { RightElevatorMotor.SetSelectedSensorPosition(0, consts::PID_LOOP_ID, consts::TALON_TIMEOUT_MS); SmartDashboard::PutBoolean("Reset Elevator Encoder?", false); } if(!SmartDashboard::GetBoolean("Go Forward, Turn Right", 0)) { //Reset Angle Button if(SmartDashboard::GetBoolean("Reset Angle", 0)) { AngleSensors.Reset(); SmartDashboard::PutBoolean("Reset Angle", 0); } //Reset Encoder Button if(SmartDashboard::GetBoolean("Reset Encoders", 0)) { ResetDriveEncoders(); SmartDashboard::PutBoolean("Reset Encoders", 0); } //Maintain Angle Test Buttons if(SmartDashboard::GetBoolean("Toggle Maintain Test", 0)) { //Toggle the two buttons SmartDashboard::PutBoolean("Enable Test Distance Output", SmartDashboard::GetBoolean("Enable Test Distance Output", 0) ^ 1); SmartDashboard::PutBoolean("Enable Maintain Controller", SmartDashboard::GetBoolean("Enable Maintain Controller", 0) ^ 1); SmartDashboard::PutBoolean("Toggle Maintain Test", 0); } if(SmartDashboard::GetBoolean("Enable Test Distance Output", 0)) { AnglePIDOut.SetTestDistOutput(SmartDashboard::GetNumber( "Test Maintain Output", 0)); } else { AnglePIDOut.SetTestDistOutput(0); } if(SmartDashboard::GetBoolean("Enable Maintain Controller", 0)) { MaintainAngleController.Enable(); } else { if(MaintainAngleController.IsEnabled()) MaintainAngleController.Disable(); } } if(SmartDashboard::GetBoolean("Test Distance", 0)) { if(SmartDashboard::GetBoolean("Toggle Distance Test", 0)) { SmartDashboard::PutBoolean("Enable Maintain Controller", 1); MaintainAngleController.Enable(); DistanceController.Enable(); } else { SmartDashboard::PutBoolean("Enable Maintain Controller", 0); DistanceController.Disable(); MaintainAngleController.Disable(); } } else if(SmartDashboard::GetBoolean("Test Auto Elevator", 0)) { AutoElevatorTest(); } else { // DriveDistance(148); // TurnAngle(90); // SmartDashboard::PutBoolean("Go Forward, Turn Right", 0); } SmartDashboard::PutBoolean("DistancePID OnTarget", DistanceController.OnTarget()); SmartDashboard::PutBoolean("MaintainAnglePID OnTarget", MaintainAngleController.OnTarget()); SmartDashboard::PutBoolean("AnglePID OnTarget", AngleController.OnTarget()); } void Robot::MaintainHeadingTest() { //Disable other controllers AngleController.Disable(); DistanceController.Disable(); //Zeroing the angle sensor AngleSensors.Reset(); //Enable test dist output AnglePIDOut.SetTestDistOutput(0.35); //Remove the pointers since only one PID is being used DistancePID.SetAnglePID(nullptr); AnglePIDOut.SetDistancePID(nullptr); //Configure the PID controller to make sure the robot drives straight with the NavX MaintainAngleController.Reset(); MaintainAngleController.SetSetpoint(0); MaintainAngleController.Enable(); } void Robot::DriveDistanceTest(double distance) { //Disable other controllers AngleController.Disable(); //Zeroing the angle sensor and encoders ResetDriveEncoders(); AngleSensors.Reset(); //Disable test dist output for angle AnglePIDOut.SetTestDistOutput(0); //Make sure the PID objects know about each other to avoid conflicts DistancePID.SetAnglePID(&AnglePIDOut); AnglePIDOut.SetDistancePID(&DistancePID); //Configure the PID controller to make sure the robot drives straight with the NavX MaintainAngleController.Reset(); MaintainAngleController.SetSetpoint(0); //Configure the robot to drive a given distance DistanceController.Reset(); DistanceController.SetSetpoint(distance); MaintainAngleController.Enable(); DistanceController.Enable(); } void Robot::TurnAngleTest(double angle) { //Disable other controllers DistanceController.Disable(); MaintainAngleController.Disable(); //Zeroing the angle sensor AngleSensors.Reset(); //Disable test dist output for angle AnglePIDOut.SetTestDistOutput(0); //Remove the pointers since only one PID is being used DistancePID.SetAnglePID(nullptr); AnglePIDOut.SetDistancePID(nullptr); AngleController.Reset(); AngleController.SetSetpoint(angle); AngleController.Enable(); SmartDashboard::PutNumber("Target Angle", angle); } void Robot::RunMotorsTestFor(int numberOfSeconds) { DriveTrain.ArcadeDrive(1, 0); Wait(numberOfSeconds / 2); DriveTrain.ArcadeDrive(-1, 0); Wait(numberOfSeconds / 2); DriveTrain.ArcadeDrive(0, 0); } void Robot::DriveTest() { double forwardSpeed = 0; double turnSpeed = 0; // If they press A, use single stick arcade with the left joystick if(DriveController.GetAButton()) { forwardSpeed = DriveController.GetY(GenericHID::JoystickHand::kLeftHand); turnSpeed = DriveController.GetX(GenericHID::JoystickHand::kLeftHand); } // If they press the left bumper, use the left joystick for forward and // backward motion and the right joystick for turning else if(DriveController.GetBumper(GenericHID::JoystickHand::kLeftHand)) { forwardSpeed = DriveController.GetY(GenericHID::JoystickHand::kLeftHand); turnSpeed = DriveController.GetX(GenericHID::JoystickHand::kRightHand); } // If they press the right bumper, use the right joystick for forward and // backward motion and the left joystick for turning else if(DriveController.GetBumper(GenericHID::JoystickHand::kRightHand)) { forwardSpeed = DriveController.GetY(GenericHID::JoystickHand::kRightHand); turnSpeed = DriveController.GetX(GenericHID::JoystickHand::kLeftHand); } // Negative is used to make forward positive and backwards negative // because the y-axes of the XboxController are natively inverted DriveTrain.ArcadeDrive(-forwardSpeed, turnSpeed); } void Robot::ManualElevatorTest() { // Use the right trigger to manually raise the elevator and // the left trigger to lower the elevator double raiseElevatorOutput = applyDeadband(OperatorController.GetTriggerAxis( GenericHID::JoystickHand::kRightHand)); double lowerElevatorOutput = applyDeadband(OperatorController.GetTriggerAxis( GenericHID::JoystickHand::kLeftHand)); SmartDashboard::PutNumber("RaiseElev", raiseElevatorOutput); SmartDashboard::PutNumber("LowerElev", lowerElevatorOutput); if(raiseElevatorOutput != 0.0 || lowerElevatorOutput != 0.0) { ElevatorPIDController.Disable(); double output = CapElevatorOutput(dabs(raiseElevatorOutput) - dabs(lowerElevatorOutput), SmartDashboard::GetBoolean("Toggle Elevator Safety", 0)); RightElevatorMotor.Set(output); LeftElevatorMotor.Set(output); return; } else if(!ElevatorPIDController.IsEnabled()) { RightElevatorMotor.Set(0); LeftElevatorMotor.Set(0); } } void Robot::CapElevatorSetpoint(double& setpoint) { // Prevent the setpoint from dipping below the min if(setpoint < consts::ELEVATOR_SETPOINTS[0]) { setpoint = consts::ELEVATOR_SETPOINTS[0]; } // Prevent the setpoint from exceeding the max else if(setpoint > consts::ELEVATOR_SETPOINTS[consts::NUM_ELEVATOR_SETPOINTS - 1]) { setpoint = consts::ELEVATOR_SETPOINTS[consts::NUM_ELEVATOR_SETPOINTS - 1]; } else { return; } } void Robot::PIDElevatorTest() { // Use the right trigger to manually raise the elevator and // the left trigger to lower the elevator double raiseElevatorOutput = applyDeadband(OperatorController.GetTriggerAxis( GenericHID::JoystickHand::kRightHand)); double lowerElevatorOutput = applyDeadband(OperatorController.GetTriggerAxis( GenericHID::JoystickHand::kLeftHand)); // If either triggers are being pressed if(raiseElevatorOutput != 0.0 || lowerElevatorOutput != 0.0) { // Output ranges from -1 to 1 and will act as a multiplier for the max increment double output = CapElevatorOutput(dabs(raiseElevatorOutput) - dabs(lowerElevatorOutput)); // If the elevator is in automatic mode, turn it off, and set the desired height to // the current height plus some increment if(m_isElevatorInAutoMode) { m_isElevatorInAutoMode = false; double desiredSetpoint = ElevatorPID.GetHeightInches() + consts::ELEVATOR_INCREMENT_PER_CYCLE * output; CapElevatorSetpoint(desiredSetpoint); ElevatorPIDController.SetSetpoint(desiredSetpoint); } else // If automatic mode isn't on, just increment the previous setpoint { double desiredSetpoint = ElevatorPIDController.GetSetpoint() + consts::ELEVATOR_INCREMENT_PER_CYCLE * output; CapElevatorSetpoint(desiredSetpoint); ElevatorPIDController.SetSetpoint(desiredSetpoint); } ElevatorPIDController.Enable(); return; } else // If neither of the triggers are being pressed, keep the elevator at its current height { ElevatorPIDController.SetSetpoint(ElevatorPID.GetHeightInches()); } // Automatic Mode is controlled by both bumpers if (OperatorController.GetBumperPressed(GenericHID::JoystickHand::kRightHand)) { // If elevator is lowering and the right bumper is pressed, stop elevator where it is if (m_isElevatorLowering) { ElevatorPIDController.SetSetpoint(ElevatorPID.GetHeightInches()); m_isElevatorLowering = false; } else { // If right bumper is being pressed for the first time, increase the desired preset by 1 if (!m_isElevatorInAutoMode) { m_targetElevatorStep = GetClosestStepNumber(); } // If right bumper has already been pressed, go to the next step. else if (m_targetElevatorStep < consts::NUM_ELEVATOR_SETPOINTS - 1) { m_targetElevatorStep++; } ElevatorPIDController.SetSetpoint(consts::ELEVATOR_SETPOINTS[m_targetElevatorStep]); m_isElevatorInAutoMode = true; m_isElevatorLowering = false; } } // The left bumper will lower the elevator to the bottom else if (OperatorController.GetBumperPressed(GenericHID::JoystickHand::kLeftHand)) { m_isElevatorInAutoMode = true; m_isElevatorLowering = true; ElevatorPIDController.SetSetpoint(consts::ELEVATOR_SETPOINTS[0]); } ElevatorPIDController.Enable(); SmartDashboard::PutBoolean("Lowering?", m_isElevatorLowering); SmartDashboard::PutBoolean("Automatic?", m_isElevatorInAutoMode); SmartDashboard::PutNumber("Elevator Height", ElevatorPID.GetHeightInches()); SmartDashboard::PutNumber("Elevator Setpoint", ElevatorPIDController.GetSetpoint()); SmartDashboard::PutNumber("Elevator Output", ElevatorPIDController.Get()); } void Robot::FullElevatorTest() { // Use the right trigger to manually raise the elevator and // the left trigger to lower the elevator double raiseElevatorOutput = applyDeadband(OperatorController.GetTriggerAxis( GenericHID::JoystickHand::kRightHand)); double lowerElevatorOutput = applyDeadband(OperatorController.GetTriggerAxis( GenericHID::JoystickHand::kLeftHand)); SmartDashboard::PutNumber("RaiseElev", raiseElevatorOutput); SmartDashboard::PutNumber("LowerElev", lowerElevatorOutput); // If either triggers are being pressed, disable the PID and // set the motor to the given speed if(raiseElevatorOutput != 0.0 || lowerElevatorOutput != 0.0) { ElevatorPIDController.Disable(); double output = CapElevatorOutput(dabs(raiseElevatorOutput) - dabs(lowerElevatorOutput), SmartDashboard::GetBoolean("Toggle Elevator Safety", 0)); RightElevatorMotor.Set(output); LeftElevatorMotor.Set(output); return; } else if(!ElevatorPIDController.IsEnabled()) { RightElevatorMotor.Set(0); LeftElevatorMotor.Set(0); } // Automatic Mode is controlled by both bumpers if (OperatorController.GetBumperPressed(GenericHID::JoystickHand::kRightHand)) { // If elevator is lowering and the right bumper is pressed, stop elevator where it is if (m_isElevatorLowering) { RightElevatorMotor.Set(0); LeftElevatorMotor.Set(0); m_isElevatorLowering = false; ElevatorPIDController.Disable(); } else { // If right bumper is being pressed for the first time, increase the desired preset by 1 if (!ElevatorPIDController.IsEnabled()) { m_targetElevatorStep = GetClosestStepNumber(); } // If right bumper has already been pressed, go to the next step. else if (m_targetElevatorStep < consts::NUM_ELEVATOR_SETPOINTS - 1) { m_targetElevatorStep++; } ElevatorPIDController.SetSetpoint(consts::ELEVATOR_SETPOINTS[m_targetElevatorStep]); ElevatorPIDController.Enable(); m_isElevatorLowering = false; } } // The left bumper will lower the elevator to the bottom if (OperatorController.GetBumperPressed(GenericHID::JoystickHand::kLeftHand)) { m_isElevatorLowering = true; ElevatorPIDController.SetSetpoint(consts::ELEVATOR_SETPOINTS[0]); ElevatorPIDController.Enable(); } } void Robot::AutoElevatorTest() { SmartDashboard::PutNumber("Elevator Height", ElevatorPID.PIDGet()); // // if(SmartDashboard::GetBoolean("Go to Increment", 0)) // { // ElevatorPIDController.SetSetpoint(SmartDashboard::GetNumber("Desired Increment", 0)); // } double elevatorHeight = SmartDashboard::GetNumber("Elevator Setpoint", 0); if(dabs(elevatorHeight - ElevatorPID.PIDGet()) > consts::ELEVATOR_PID_DEADBAND) { double error = elevatorHeight - ElevatorPID.PIDGet(); while(error > 1) { bool inAuto = IsTest(); if(!inAuto) { RightElevatorMotor.Set(0); LeftElevatorMotor.Set(0); return; } SmartDashboard::PutBoolean("Elev On Target?", false); //To avoid damage, use basic p-control with an added constant output speed of 0.5 error = elevatorHeight - ElevatorPID.PIDGet(); RightElevatorMotor.Set(limit(error * SmartDashboard::GetNumber("Elevator P", 0) + SmartDashboard::GetNumber("Elevator Constant", 0), SmartDashboard::GetNumber("Elevator Limit", 0))); LeftElevatorMotor.Set(limit(error * SmartDashboard::GetNumber("Elevator P", 0) + SmartDashboard::GetNumber("Elevator Constant", 0), SmartDashboard::GetNumber("Elevator Limit", 0))); SmartDashboard::PutNumber("Elevator Height", ElevatorPID.PIDGet()); } // ElevatorMotors set to a slow but constant speed to keep the elevator from falling // due to gravity RightElevatorMotor.Set(0.25); LeftElevatorMotor.Set(0.25); } EjectCube(consts::INTAKE_SPEED / 2.); SmartDashboard::PutBoolean("Elev On Target?", true); SmartDashboard::PutBoolean("Test Auto Elevator", false); // ElevatorMotors reset to 0 RightElevatorMotor.Set(0); LeftElevatorMotor.Set(0); } void Robot::IntakeTest() { // Use the B button to intake, X button to override IntakeUltrasonic if(OperatorController.GetBButton()) { RightIntakeMotor.Set(consts::INTAKE_SPEED); LeftIntakeMotor.Set(-consts::INTAKE_SPEED); } else { // Use the A button to eject if the B button is not being held if(OperatorController.GetAButton()) { RightIntakeMotor.Set(-consts::INTAKE_SPEED); LeftIntakeMotor.Set(consts::INTAKE_SPEED); } else { RightIntakeMotor.Set(0); LeftIntakeMotor.Set(0); } } } void Robot::LinkageTest() { // Use the left y-axis to do the linkage LinkageMotor.Set(OperatorController.GetY(GenericHID::JoystickHand::kLeftHand)); } void Robot::CurrentTest() { SmartDashboard::PutNumber("FL Current", FrontLeftMotor.GetOutputCurrent()); SmartDashboard::PutNumber("FR Current", FrontRightMotor.GetOutputCurrent()); SmartDashboard::PutNumber("BL Current", BackLeftMotor.GetOutputCurrent()); SmartDashboard::PutNumber("BR Current", BackRightMotor.GetOutputCurrent()); SmartDashboard::PutNumber("RIntake Current", RightIntakeMotor.GetOutputCurrent()); SmartDashboard::PutNumber("LIntake Current", LeftIntakeMotor.GetOutputCurrent()); }
CLASSIFICATION ENSEMBLE BASED ANOMALY DETECTION IN NETWORK TRAFFIC Recently, the expansion of information technologies and the exponential increase of the digital data have deepened more the security and confidentiality issues in computer networks. In the Big Data era information security has become the main direction of scientific research and Big Data analytics is considered being the main tool in the solution of information security issue. Anomaly detection is one of the main issues in data analysis and used widely for detecting network threats. The potential sources of outliers can be noise and errors, events, and malicious attacks on the network. In this work, a short review of network anomaly detection methods is given, is looked at related works. In the article, a more exact and simple multi-classifier model is proposed for anomaly detection in network traffic based on Big Data. Experiments have been performed on the NSL-KDD data set by using the Weka. The offered model has shown decent results in terms of anomaly detection accuracy. INTRODUCTION Security issues have always been thought of people from the existence of humanity. In the Big Data era, the interest in security issues has also increased and has turned into a very serious scientific-research direction in some aspects as political, economic, social, demographic, military, environmental, and so on. As shown in Hajirahimova can be seen from two different aspects Big data and information security issues: Application of Big Data Analytics in Information Security and Information Security Problems in Big Data Technologies. In other words, these approaches are two sides of the same coin. Therefore we must use intelligent analytics parallel to traditional security mechanisms in improving Big Data security. cyber-terrorism, cyber-wars, APT-Advanced Persistent Threat is rapidly increasing. Collecting large volume of data from network, host, security devices, etc. to detect these attacks, which greatly threaten the security of corporate computers updating the issue of detecting anomalies in the data and requires more effective analysis methods and algorithms for solving the problem. Because early detection of dangerous traffics on computer networks, analysis of log files is an essential condition in providing network security. The detection of anomalies is one of the main issues in data analysis. Anomalies detection allows you to interfere with unusual behaviors. Precautionary prevention of attacks (from 0 the day), filtering of previously unknown and malicious data is particularly important. An anomaly is understood as a regularity that is not consistent with normal behavior in the data or defined the indicator of the data. In studies, "outliers", "exceptions", "peculiarities", "surprise" terms are also used as a synonym for the anomaly. In other words, the detection of anomalies is the problem of finding templates that are not appropriate for probability acceptable behavior. This problem is more noticeable in the context of big data. Traditional methods for detecting anomalies do not show good results in the big data which determined features such as large volumes, variations, high speeds. Inaccurate detection or processing of anomalies direct impacts on the reliability of gained knowledge. Therefore, proper identification of anomalies is an important issue, but also not a simple matter. In this context, Google's MapReduce framework provides an effective method for analysis of large amounts of data, Technologies such as Clouds Computing, MapReduce, Hadoop, etc., have enough computing power to handle large-scale data processing. With the helping of these technologies, it has been possible to integrate and analyze multiple network data. As a result, security analytics technology has been acquired based on big data analysis. The purpose of the submitted study is analyzing anomalies more accurately, more precisely based on Big Data Analytics. Big data analytics is the process of detecting hidden didactics, unknown correlation and other useful information in large volumes of data for making optimal (the best) decision. In this context, anomaly detection is in the focus of attention both in scientific research and application fields as a very serious problem [1,2,. Detection of anomalies is one of the application contexts in the large-scale data at various information security objects. For this purpose, in this paper, a model is offered for detecting network attacks based on Big Data Analytics. In the proposed model, the object of the research is network traffic that is considered one of the Big Data sources (log files that stored on the server for analysis and monitoring data, etc.). The next parts of the paper have been organized as follows: In the second section of the article existing approaches are discussed in network traffic anomaly detection. In the third section, the proposed model has been described. In the fourth section experiments and results are interpreted. Later conclusion and a list of used literature are given. RELATED WORK Generally, Intrusion Detection Systems (IDS) are the first systems based on intelligent data analysis in the field of information security. The purpose of IDS is to detect malicious traffic -anomalies by controlling both input and output traffic. That`s IDS can classify events and behaviors as malicious and normal. These systems are usually based on rules (or signature-based IDS) and anomalies. The detection systems of traditional threats based on rules can find known templates. The main problem is that the majority of errors (classification or reversal of harmful flows in traffic) and inability to use high-capability networks, failure to identify new attacks. Anomaliesbased approaches are able to detect distinctive templates than normal behaviors. This is its advantage. © 2019 Conscientia Beam. All Rights Reserved The detection of anomalies in the data study has begun in the nineteenth century. Over time, special methods have been developed to detect anomalies in many application fields. Some methods consist of more general theoretical approaches. In Chandola, et al. ; Hodge and Austin ; Wang, et al. is given a wide overview of anomalies detection methods. In these studies are given various aspects of anomaly detection problems (nature of data, types of anomalies -context anomaly, collective anomaly, data label), classification of anomaly detection based on methods. From a methodological point of view network anomaly detection methods are divided into two groups: stochastic and deterministic. In stochastic methods data is modeled according to probability. Stochastic methods adapt data to a predictable model and assess the compatibility of new traffic compared to this model. Evaluation is based on a statistical hypothesis. Deterministic methods divide the function into two parts: "normal" and "abnormal". Borders are defined by cluster analysis and SVM methods, etc. In terms of data, the anomaly detection methods are flow-based, packet-based and window-based. Studies show that network detection methods are divided into two types: supervised and unsupervised. Under supervised detection methods, the normal behavior model of the system or network is created based on the training data. In unsupervised detection methods is not used any training data. In numerous studies, statistical approaches for detection of anomalies reduction of size, based on machine learning, neural networks, the Bayesian network, entropy, based on rules and optimization, SVM-based, etc. models and algorithms were proposed. The analysis of network traffic can help engineers to create a reliable network, be protected from extra downloads, predict dangers in advance. For this purpose, packet size, duration, IP addresses, ports and etc. usually has been used in research. In Adibi Adibi has commented on the essence of traffic classification methods in the context of Packet, Flow, and Application. In many studies, hybrid or multi-level classification models have been suggested to increase the accuracy of classification in the detection of anomaly [8,10,. The combined use of numerous data mining methods is known as an ensemble approach, and the process of learning the correlation between these ensemble techniques is known by names such as meta-learning. The researchers of Columbia University propose using of mechanism that consists of some classifiers for increasing accuracy and efficiency of IDS. They show that a method cannot detect an attack, the probability of detecting an attack of other method is high. A Branitskiy and Kotenko have proposed hybridization model of intelligent computation methods as neural networks, neuro-fuzzy classifiers, and SVM with the purpose of effective detection of network attacks. Authors use the method of principal components to accelerate the processing of input vectors. A multi-level analysis of network traffic is one of the advantages of the proposed model. In Aljawarneh, et al. Aljawarneh and others propose a hybrid algorithm as consisting of J48, MetaPagging, Random Tree, REPTree, AdaBoostMl, DecisionStump, and NaveBayes classifiers, that is measured with high accuracy degree and allows minimization of both computation and time. In Imamverdiyev and Sukhostat approach was proposed which is based on informative features for anomaly detection of network traffic. A higher dimension characterized by the number of features which is the main problem in the analysis of network traffic. The selection (reduction) of main features improves the efficiency of classification. Reduction of feature also allows interpreting results better. In cybersecurity field majority of current methods consists of heuristic approaches which have high computation complexity. In Pajouh, et al. machine learning approach is proposed to detect anomalous traffic in the network. Proposed two-tier classification model provides high detection speed as a result of the optimal reduction of dimension. It allows detecting less common like U2R, R2L, and more dangerous attacks exactly. In Rehk, et al. a specific model is proposed that based on trust and reputation mechanism to detect network anomaly by the integration method of some algorithms. In the network, the model can identify the important events (scans, DoS attacks, worms etc.) reliably. Authors get reducing errors, false positives, and false negatives by applying of agent technology to the analysis of network behavior. So, they get more exact results. The researchers of Purdue University propose a new architecture model to define anomalous behaviors of the system by using Genetic Programming for detection of attacks. Networks are complex interacting systems and are comprised of several items such as routers and switches. Researchers have approached the anomaly detection problem in networks using various techniques such as artificial intelligence, machine learning etc. Greater synergy between the networking and signal processing areas will help develop better and more effective tools for detecting network anomalies problems. The application of signal processing techniques to this area is still in its infancy, and we believe that it has great potential to enhance the field, and thereby improve the reliability of IP networks. In this paper, authors review the use of signal processing techniques to address the problem of measuring, analyzing, and synthesizing network information to obtain normal network behavior. Statement of the Issue In the article multi-classification approach that allows detecting anomaly in the network traffic network is suggested in Figure 1   Where,  n is the number of data points in the dataset,  k is a number of classifiers, that takes part in the ensemble,  ij a is an evaluation of classifier for every data point in the dataset. The development of the proposed model consists of the following steps: Step 1. Selection of training and test datasets-NSLKDD. About this dataset will be informed in the next sections; Step 2. First processing phase. Noisy data clearance for the purpose of only storing useful information or the application of normalization or correction methods to simplify the processing process; Step 3. Build a hybrid model consisting of 3.J48, LogitBoost, IBk, AdaBoost, RandomTree classifiers; Step 4. Testing of classifiers on data; Step 5. Choosing a method for creating a classifier ensemble (for example, Stacking). As shown in the scheme the proposed method consists of two phases for anomaly detection. At the first phase training is performed, and at the second phase algorithm is tested for anomaly detection. A. Dataset -NSL KDD One of the main problems faced by researchers in detecting network anomalies is the lack of open data sets. In order to solve the problem, the importance of etalon test data for the testing of IDS systems in the 1990s was recognized and the DARPA data collectors appeared. It should be noted that at present, many data sets are available to the researchers for testing IDS systems: Each record examples in the NSL-KDDTrain dataset contains 42 attributes Table 1 that are marked as normal or anomalies and reflect different properties in themselves. It should be noted, TCP, UDP, and ICMP protocols have been used in NSL-KDD dataset. In NSL-KDD ; Dhanabal and Shantharajah ; Revathi and Malathi detailed information about data, attributes, names, descriptions and so on were informed. B. WEKA (Waikato Environment for Knowledge Analysis) It is more important to have the perfect tools for the intelligent analysis of data. The first version of Weka's open source software was developed in Java programming language at Waikato University in New Zealand in 1993, to provide data analysis and machine learning algorithms. This allows it to be used on any computer platform. WEKA offers researchers with initial processing tools, multiple classification and clustering, regression methods and provides visualization of results. Over the past years, the software has been developed, and researchers have been created the most up-to-date opportunities. It should be noted that the application of latest version 3.8.1 to the Big Data has been realized 1. C. Classification Methods Decision Tree (or J48) is predictive machine learning language that it decides the target value of new example based on different features of available information. Decision trees create a hierarchical partitioning of the data, which relates the different partitions at the leaf level to the different classes. The hierarchical partitioning at each level is created with the use of a split criterion. The split criterion may either use a condition (or predicate) on a single attribute, or it may contain a condition on multiple attributes. The overall approach is to try to recursively split the training data so as to maximize the discrimination among the different classes over different nodes. In Weka implemented an algorithm for creating decision trees under the name J48. KNN (or IBk) -Non-parametric classifier "Nearest neighbor" is also known as "Instance-based" learning. K nearest neighbors are simple classification algorithm based on similarity or distance calculation between instances. To classify an unknown instance represented by some feature vectors as a point in the feature space, the KNN 18 © 2019 Conscientia Beam. All Rights Reserved classifier calculates the distances between the point and points in the training dataset. For the labeling of each test sample objects must sequentially perform the following operations :  Calculate the distance to each of the training sample objects;  Select k training sample objects, the distance to which is minimal;  The class of the object being classified is the class most often encountered among the k nearest neighbors. Usually, the Euclidean distance function is the most widely used the distance metric. The AdaBoost short for adaptive boosting, algorithm, introduced in 1995 by Freund and Schapire Boosting originated from the question of whether a set of weak classifiers could be converted to a strong classifier. It can be used in conjunction with many other types of learning algorithms to improve performance. The output of the other learning algorithms ('weak learners') is combined into a weighted sum that represents the final output of the boosted classifier. That is, it focuses on classification problems and is aimed at converting a set of weak classifiers into a strong one. AdaBoost is adaptive in the sense that subsequent weak learners are tweaked in favor of those instances misclassified by previous classifiers. AdaBoost is sensitive to noisy data and outliers. In some problems, it can be less susceptible to the overfitting problem than other learning algorithms 2. LogitBoost is also referring to a boosting classification algorithm. The LogitBoost algorithm is formulated by professors at Stanford University Jerome Friedman, Trevor Hastie, and Robert Tibshirani. LogitBoost and AdaBoost are close to each other in the sense that both perform an additive logistic regression. The difference is that AdaBoost the exponential loss, whereas LogitBoost minimizes the logistic loss 3. A random tree is a tree or arborescence that is formed by a stochastic process. Types of random trees include Random binary tree, Random recursive tree, Random forest, etc 4. EXPERIMENTAL RESULTS AND DISCUSSION In this section carried out experiments carried out and their results are summarized. Experiments were made on a computer with a Windows 8.1 (64bit) operating system, Intel (R) Core (TM) i5-2400 processor, 4GB of RAM, and the training and testing NSL-KDD dataset were used to detect anomalies in network traffic. These files contain 19 © 2019 Conscientia Beam. All Rights Reserved 42 attributes as well as "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land", "wrong_fragment", "urgent", "hot", dst_host_count, etc. Interpretation of Classification Results In the result of the classification process confusion matrix is gained. The dimension of the matrix may be two and more according to the number of classes. In the research two-dimensional matrix is obtained for having two classes (normal, anomaly) and its structure is shown in Table 2. We can also calculate precision, recall, F -measure, accuracy assessments after knowing the values of the above mentioned four parameters. Precision is the measured proportion of the number of correctly predicted positive observations to the number of total positive observations. Accuracy is the percentage of test set samples that are correctly classified by the model. In other words, this accuracy is expressed by the quotient of the number of correctly predicted observations to the number of total observations. Here correctly predicted observations may be from yes and no classes. Therefore TP and TN sum up for finding correctly predicted observations. And the number of total observations is expressed by the sum of the above-mentioned parameters. Then, the following formula is correct for accuracy. The SVM Radial Basis Function was taken as a meta classifier in the classifier ensemble. The result can be interpreted as follows. As can be seen in Table 3, the detection accuracy of the proposed approach for all metrics is higher than other methods. The accuracy of the anomalies of the proposed model is over 83 percent. FPR errors are low when the precision indicator is high, and FNR errors are low when the recall indicator is high. So it is difficult for both precision and recall indicators to be high at the same time. Because, in most cases when the recall indicator increases, the precision indicator is low and vice versa. F -measure shows that the model is exact(accurate) or not. This indicator shows that precision and recall indicators are high enough. The ROC (Receiver operating characteristic) curve presented in Figure 2 was used as a visualization tool. For this purpose, Matlab software was used. In the curve, the x-axis reflects lies, and the y-axis reflects negative lies.
<reponame>Enngage/KenticoCloudAngular2SDK import { getDeliveryClientWithJson } from '../../../setup'; import { Elements, IContentItem, Responses, pascalCasePropertyNameResolver } from '../../../../../lib'; import * as warriorJson from './base-case-property-resolver.spec.json'; type MockWarrior = IContentItem<{ FirstName: Elements.TextElement; LastName: Elements.TextElement; Age: Elements.NumberElement; Residence: Elements.TextElement; City: Elements.TextElement; }>; describe('Pascal case property resolver', () => { let response: Responses.IViewContentItemResponse<MockWarrior>; beforeAll(async () => { response = await ( await getDeliveryClientWithJson(warriorJson, { propertyNameResolver: pascalCasePropertyNameResolver, projectId: 'X' }) .item<MockWarrior>('x') .toPromise() ).data; }); it(`checks element is assigned #1`, () => { expect(response.item.elements.FirstName.value).toEqual(warriorJson.item.elements.first_name.value); }); it(`checks element is assigned #2`, () => { expect(response.item.elements.LastName.value).toEqual(warriorJson.item.elements.last__name.value); }); it(`checks element is assigned #3`, () => { expect(response.item.elements.Age.value).toEqual(warriorJson.item.elements.age_.value); }); it(`checks element is assigned #4`, () => { expect(response.item.elements.Residence.value).toEqual(warriorJson.item.elements._residence.value); }); it(`checks element is assigned #5`, () => { expect(response.item.elements.City.value).toEqual(warriorJson.item.elements.____city___.value); }); });
/* Find segment at a given physical extent in a PV */ struct pv_segment *find_peg_by_pe(const struct physical_volume *pv, uint32_t pe) { struct pv_segment *peg; dm_list_iterate_items(peg, &pv->segments) if (pe >= peg->pe && pe < peg->pe + peg->len) return peg; return NULL; }