content
stringlengths 10
4.9M
|
---|
// handleMsgSetDefaultWallet allows to properly handle a MsgSetDefaultWallet
func (m *Module) handleMsgSetDefaultWallet(msg *typeswallets.MsgSetDefaultWallet) error {
if err := m.walletsRepo.SaveDefaultWallets(msg); err != nil {
return err
}
targetWallet, err := m.walletsRepo.GetWallets(filter.NewFilter().SetArgument(dbtypes.FieldAddress, msg.Address))
switch {
case err != nil:
return err
case len(targetWallet) != 1:
return typeswallets.ErrInvalidAddressField
}
account, err := m.accountsRepo.GetAccounts(filter.NewFilter().SetArgument(dbtypes.FieldAddress, targetWallet[0].AccountAddress))
switch {
case err != nil:
return err
case len(account) != 1:
return typesaccount.ErrInvalidHashField
}
for _, walletAddr := range account[0].Wallets {
w, err := m.walletsRepo.GetWallets(filter.NewFilter().SetArgument(dbtypes.FieldAddress, walletAddr))
switch {
case err != nil:
return err
case len(w) != 1:
return typeswallets.ErrInvalidAddressField
}
if !w[0].Default {
continue
}
w[0].Default = false
if err := m.walletsRepo.UpdateWallets(w...); err != nil {
return err
}
break
}
targetWallet[0].Default = true
return m.walletsRepo.UpdateWallets(targetWallet[0])
} |
The present and future of cough counting tools
The widespread use of cough counting tools has, to date, been limited by a reliance on human input to determine cough frequency. However, over the last two decades advances in digital technology and audio capture have reduced this dependence. As a result, cough frequency is increasingly recognised as a measurable parameter of respiratory disease. Cough frequency is now the gold standard primary endpoint for trials of new treatments for chronic cough, has been investigated as a marker of infectiousness in tuberculosis (TB), and used to demonstrate recovery in exacerbations of chronic obstructive pulmonary disease (COPD). This review discusses the principles of automatic cough detection and summarises key currently and recently used cough counting technology in clinical research. It additionally makes some predictions on future directions in the field based on recent developments. It seems likely that newer approaches to signal processing, the adoption of techniques from automatic speech recognition, and the widespread ownership of mobile devices will help drive forward the development of real-time fully automated ambulatory cough frequency monitoring over the coming years. These changes should allow cough counting systems to transition from their current status as a niche research tool in chronic cough to a much more widely applicable method for assessing, investigating and understanding respiratory disease.
patient's perception of actual coughing events, rather than a direct assessment of cough itself. Subjective and objective measures show only moderate correlation at best (12).
Objective measurement aims to offer an impartial quantification of the physiological and pathological phenomenon of cough. Although it is possible to measure different physical characteristics, including force or intensity (13,14), and acoustic properties of cough (15), the most widely measured objective variable for assessing cough is its frequency.
There are several reasons why cough frequency measurements might be useful. Chronic cough is a common disorder, affecting approximately 10% of the population (16), and is associated with significant morbidity (17,18). Cough counts in individual patients are an objective marker of cough severity, variation over time may suggest triggers or aetiologies, and repeated measures following treatments can assess their efficacy. With the wider use of cough monitoring in research, objective cough counts are now becoming primary endpoints in clinical trials of anti-tussive therapies (19,20).
Cough frequency measurement may also be useful for monitoring treatment of other respiratory diseases (21), assessing infectiousness in tuberculosis (TB) (22), detecting early signs of exacerbations of chronic respiratory disease (23), and possibly for screening for the early stages of potentially treatable diseases including lung cancer and TB.
For the time being, at least to our knowledge, cough frequency monitoring has not been incorporated into routine clinical practice and remains a research tool. This review will provide an up-to-date overview of cough counting tools currently and recently used in research, discuss technological aspects, and speculate on possible future developments in the field.
Due to the scope of this review and the constantlyevolving nature of the field, it does not aim to be comprehensive, but rather to offer the reader insights by focusing on key principles, and on technology and devices which have led to significant advances in the understanding and management of cough.
Defining cough
Cough is characterised by three stages. During the first, inspiratory stage, air is drawn into the lungs. This is followed by the compressive stage characterised by forced expiratory effort against the closed glottis. Finally, during the expulsive stage there is opening of the glottis and rapid outflow of air. This sudden release of rapid and turbulent expiratory airflow is responsible for the characteristic sound of a cough which essentially defines it (24,25). This third stage itself usually has three component phases which comprise the cough sound, as described in Figure 1 and below.
Coughs often occur close together in clusters, which may be described as epochs, bouts, peals or attacks. During these, the initial inspiratory phase is followed by a series of further compressive phases associated with glottal closure, sometimes with additional inspirations (24). A bout or epoch is defined as a cluster of two or more cough sounds, separated from the next by an interval of no more than 2 seconds (24). Different units have been proposed for cough counting, including individual cough sounds, cough bouts, and time spent coughing (26). No measure is demonstrably superior, but numbers of cough sounds and time spent coughing are very closely correlated (27). Individual cough sounds, either occurring in isolation or as part of a cluster, are probably therefore the most intuitive basic units of cough (25,28).
Establishing a reference standard for counting coughs
The assumed gold standard method for counting coughs, against which potential automated cough counters are evaluated, is the 'manual' counting of cough sounds from near-patient audio recordings. Within pairs, cough counts performed by experienced cough researchers are reported to be highly consistent (29,30). One study though has specifically set out to test the validity of cough counting by ear amongst a larger group (28).
Fifteen doctors untrained in counting coughs were asked to individually listen to audio recordings, lasting around 15 minutes and containing coughs from patients with respiratory diseases. There was very close inter-and intra-observer agreement of the reported cough counts. The recordings contained a mixture of lone coughs and coughing bouts, and no specific instructions were given as to how coughs should be counted. The consistently reported values for cough frequency corresponded to the total numbers of individual cough sounds, providing further good justification not only for counting by ear as the reference standard for determining cough frequency, but also for using discrete cough sounds as the basic unit for cough counting (28).
A brief history of cough frequency monitors
Attempts at quantifying cough frequency over prolonged periods began in the 1950s (31). In 1964 Woolf and Rosenberg were able to demonstrate a reduction in cough frequency with anti-tussive therapy by counting coughs recorded from a microphone above the head of the bed connected through to a tape recorder (32). Recording was triggered by sound from the microphone and continued to record for 5 seconds after the sound had stopped, allowing it to record up to 24 hours of observations on 2 hours of tape. Manual counts of the audio recordings were then undertaken by the investigators. A similar system was developed by Loudon and colleagues in the 1960s (33,34). Again, due to limitations of technology at the time, equipment was bulky, and patients confined to a single room for the duration of monitoring, which in this case was up to 10 hours. The final recordings, also analysed by ear, similarly represented only certain portions of the full recording period, during which sounds meeting pre-specified amplitude and frequency criteria had triggered the audio capture apparatus. This inability to record for the full duration of the monitoring period presumably led to the omission of a proportion of coughing events with both systems.
In 1988 Salmi et al. made the first attempt at the automation of cough frequency monitoring (35). The researchers' apparatus recorded cough sounds via a microphone and body movements with a static chargesensitive mattress. Coughs had to breach specified acoustic and movement thresholds to be automatically identified. The method was evaluated by comparison to cough counts from a researcher simultaneously observing the patient in real time during the recording period. In 7 patients, the machine detected 809 cough events, of which 794 were true positives, showing a sensitivity of 99.0% and specificity of 98.1%. The method was, however, extremely restrictive to subjects. They were required to remain sitting or lying in bed isolated in a single hospital room, told to avoid sudden movements and loud noises, and to remain facing away from the pillow. Presumably in part because of this, the system does not seem to have been developed any further.
The 1990s and early 2000s saw advances in technology which enabled the development of ambulatory devices (36). MP3 recording, digital storage, miniaturisation of microphones, and developments in battery technology facilitated the ability to record and capture high quality data continuously over prolonged periods (37).
Processing the recorded information however remained the main limiting step owing to a reliance on manual assessment. Counting coughs by ear from prolonged audio sequences is not only laborious and time-consuming, but auditory fatigue may lead to miscounting and errors; ideally a subset of recordings should be counted twice, by more than one observer, to determine consistency and quality control. Any steps towards full automation of cough counting would therefore be highly attractive.
Principles of automatic cough detection
Most previous studies focusing on cough detection have used a conventional approach to audio signal processing ( Figure 2), applying techniques used in automatic speech recognition. The core of this strategy involves three steps: silence removal, feature extraction and classification. However, different methods have been proposed for these steps, and there is currently no standardised methodology for automatic cough detection.
Cough signal capture
The most studied signal for cough evaluation is sound, acquired by means of microphones. Other investigated m o d a l i t i e s i n c l u d e : e l e c t r o m y o g r a p h y ( E M G ) , electrocardiography (ECG), nasal thermocouple sensors, effort belts, and accelerometry (21), individually or together (38)(39)(40)(41)(42). Microphones can be classified as contact, and noncontact (43,44). Non-contact microphones are either worn on the outer clothing or placed in the subject's vicinity and detect fluctuations in air pressure which are converted into electrical signals by transduction. Contact microphones are attached to the skin surface and use piezoelectric transducers to sense audio vibrations through direct physical contact. Although less sensitive to ambient noise than noncontact microphones, their high sensitivity is prone to noise from movement artefact (45).
An array or combination of microphones may enhance the spatial filtering selectivity of recordings and improve cough discrimination, particularly in noisy environments (46,47).
A cough sound can be described as a non-stationary signal and, as discussed, separated into three component phases: the explosive phase, intermediate phase, and voiced phase ( Figure 1). A cough lasts on average 350 ms, with a first peak of mean frequency c. 400 Hz, a secondary peak of highest continuous frequency c. 4,000 Hz (48), and frequency components of sound spread up to 20 kHz (49). To properly characterize cough events the complete capture of all details from the audio signal is necessary. A range of sampling frequencies for the acquisition of the audio signal has been suggested, from 8 kHz (50) to 48 kHz (51). The frequency response of the microphone must be within the sound frequency range of the coughing events. In addition, the selection of a sampling rate must consider the highest frequency component of the cough sounds; as stated by the Nyquist sampling theorem, components above half the sampling rate must be filtered out to avoid aliasing (52). The sampling rate will impact on the volume of data acquired; lower rates reduce hardware and data storage requirements, as well as time needed to carry out automated analysis, which are highly relevant for the development of practical applications.
Windowing and silence removal
Cough sound signals are commonly split into signal segments using a moving window, so that all subsequent analyses are performed on each segment (53). Prior to extracting information from the cough signal, preprocessing of the audio data is necessary with the aim of removing sources of noise that obscure its evaluation. In general, cough events are segmented and separated from other noise sources, a method known as silence removal (54). Silence removal methods can be performed manually or using automated methods such as standard deviation, shortterm energy, and zero-crossing (54). Standard deviation is the measure of dispersion of signal segments. A lower standard deviation refers to segments of low activity while higher standard deviation indicates segments with potential cough activity. Short-term energy represents the signal power. A low signal power is related to silence periods while a high signal power indicates a potential cough activity. Zero-crossing rate represents the number of times the signal crosses to zero. The higher the zero-crossing, the greater the amount of noise.
As suggested by Cohen-McFarlane et al., one step forward to improve silence removal and segmentation methods could be to implement an adaptive approach that improves the ability of separating background noise from audio activity (54). Nonetheless, these silence removal methods have limitations and work best in quiet ambient conditions, but lead to poorer segmentation in the case of low signal-to-background levels.
Feature extraction
Mel-frequency cepstral coefficients (MFCC) and their variations have been the most widely-used features for cough detection (41,(55)(56)(57)(58)(59)(60)(61)(62)(63)(64)(65). MFCC, which are widely used in automatic speech recognition, represent the envelope of the short-term power spectrum of a sound signal. Spectral shape features may also help to differentiate cough from other sounds, particularly speech; compared to speech, cough sounds have a closer similarity to noise, and therefore have a wider spectrum.
Cough detection algorithms to date have included several distinct spectral features. Spectral flatness (41,50,66) represents how flat the spectrum of a signal is or how similar to noise a sound is; spectral centroid (41,50,66,67) concerns the weighted mean of the spectrum, usually higher in cough sounds than in other sounds; formant frequencies (50,56,59,64) are spectral peaks at the resonant frequencies associated with cough generation; and spectral kurtosis (59,64,(67)(68)(69) measures how peaked the spectrum is.
Other features have been used for cough detection.
T h e s e m a y i n c l u d e : t h e n o n -G a u s s i a n i t y s c o r e (56,57,59,64,70), quantifying the deviation of a signal from a Gaussian model, the value of which is typically high in cough sounds; log-energy (59,61,64), relating to the amplitude of cough signals; and Hu moments, a technique of weighted averaging widely used in image processing and recently proposed in the signal processing field for speech emotion recognition (67,71,72). Usually, these and other features are combined to form the input data set for classification. However, there is no consensus standard for the optimum set of acoustic features for cough detection.
Classification
Classification techniques aim to categorise sounds into cough and non-cough events. Artificial neural networks (ANN) are algorithms that attempt to simulate the behaviour of the human brain, and have been applied to attempt to differentiate between cough and non-cough events (56), cough segments and swallow signals, rest states and different non-cough artefacts (73), and to differentiate between cough, speech and noise (74). Recently, with advances in deep learning techniques, new approaches to cough detection have been proposed (55,62,75). These approaches do not require feature extraction prior to classification, but the short-time Fourier transform or spectrogram of the cough sound signal is passed directly to a deep neural network within which feature extraction is performed automatically, thus facilitating the processing of cough sound signals.
The hidden Markov model (HMM) is a statistical technique successfully used for speech recognition. HMMs represent the spectral properties of a time-varying pattern of cough and have been employed for the automatic detection of cough events in ambulatory patients with respiratory diseases (65). In addition, hybrid models combining ANN and HMM (74), and DNN along with HMM (62) have been proposed to enhance the performance of cough detection by taking into account temporal variations in the cough signal.
Logistic regression classification is a kind of predictive analysis algorithm that assigns observations to a discrete (e.g., binary) set of classes. This technique has been used for the diagnosis of pertussis by evaluating cough and whoop sounds (76), and to separate cough and non-cough events (50,77). Other interesting applications make use of support vector machine, an algorithm that it is suitable for the analysis of small data sets and that has being employed to improve automatic a croup diagnosis system (78).
Linear discriminant analysis (LDA) is a technique used for dimensionality reduction increasing separation between classes allowing classification tasks. LDA has been employed to obtain the best probable separation between cough and non-cough events (56).
The accelerated progress being made in the field of classification algorithms should allow more accurate and efficient classification between cough and non-cough events. The associated reduction in the computational cost could facilitate implementation in ambulatory scenarios, for example with incorporation into widely-available mobile devices as discussed below.
A major limitation when comparing different methods for automatic cough detection is that the datasets used in different studies are not the same, with main differences in type and position of microphones, recording conditions, study subjects and types of non-cough sounds included. Since these aspects influence the performance evaluation of methods, it is difficult to directly compare different approaches to automatic cough detection.
Modern cough monitors
The ideal ambulatory cough monitoring system would be easily portable, compact, minimally-intrusive to the patient, and probably operate over no less than a 24hour period to take account of diurnal variations in cough frequency (79). Such a device should reliably and consistently detect all coughs and distinguish them from all non-cough sounds, other respiratory movements, and ambient noise (i.e., high sensitivity and specificity), operate in a patient's own environment, function across a range of subjects, types of cough sound, in health, and in different diseases, and provide a fully automated analysis of the collected data. Ideally, cough monitors should also work in noisy environments and not mistake the coughs of other individuals for those of the subject of interest.
As discussed, the miniaturisation of electronic devices and the digitalisation of recording technology has led to wearable cough monitors capable of recording continuously for 24 hours or longer during daily activities. Despite various recent attempts at developing automated or semiautomated cough frequency monitors, some of which are described below, only two systems have been widely adopted in cough research: the Leicester Cough Monitor (LCM) and the VitaloJAK™ (37).
The dextromethorphan trials
In 2001 Pavesi et al. published a meta-analysis of 6 trials investigating the antitussive effect of dextromethorphan. Amongst 710 patients, cough reduction was demonstrated using cough frequency data collected by a portable computerised cough acquisition and analysis system, in the patient's own environment. The device consisted of a contact microphone, attached to the suprasternal notch, detecting audio and vibration signals. Data was collected by a frequency modulation transmitter, worn in a belt pouch, and sent wirelessly to the hardware within the patient's home. Subjects could move freely within a 100-metre radius of the computer collecting the data. The system employed fully computerised acquisition of data but relied on manual counting of audio and visual displays.
Three hours of continuous cough recording was undertaken after treatment was initiated in each subject. Cough bouts, components, effort, intensity and latency were all measured. The antitussive effect of a single dose of dextromethorphan was demonstrated, and consistent results achieved by the cough counting system, showing for the first time the feasibility of portable cough monitors to evaluate treatment. Additionally the study showed cough frequency rather than intensity was a more responsive measure (80).
The Lifeshirt ®
The Lifeshirt ® system was a multimodality automated cough counting tool comprising respiratory inductance plethysmography (RIP) for the non-invasive measurement of ventilatory variables, an accelerometer, single channel ECG, and a unidirectional throat microphone. The device was evaluated in one study of eight subjects with chronic obstructive pulmonary disease (COPD) against manual counts from video surveillance. The sensitivity and specificity of the device for counting coughs was reported as 78.1% and 99.6%, respectively (81). There is no other published validation data; the company developing the product ceased trading and any further work looks unlikely. Nevertheless, this technology has influenced other products which are discussed below.
The RBC-7, Logan Sinclair LR100 and LR102
The RBC-7 recorded surface EMG and audio cough signals with a capacity for over 48 hours of data to an ambulatory device. Additional data from ECG and accelerometer supplied information on the subject's activity. Validation included data from 20 subjects and was compared to manual counting from tape recordings. No significant difference in the number of single coughs recorded by each system was detected (correlation coefficient =0.996) (82). However, the system was not automated and therefore required full manual counts.
To overcome this operator dependence, modification with preset algorithms for data analysis was applied and the device re-named the LR100. It was validated in 14 children during exacerbations of cystic fibrosis (83). Further work in 2003 showed the feasibility of objective cough monitoring in younger infants and children. The system was shown to be well-tolerated and had a mean sensitivity of 81% for detecting coughs in comparison to human counts of video recordings (84).
The final update was to the LR102. This comprised 3 EMG sensors and a contact sound transducer. The EMG electrodes were placed across the chest: in the sixth right intercostal space, the left mid-clavicular region, and the epigastrium. The sound sensor was placed in the second left intercostal space. Data analysis was fully automated and off-line. Validation examined data collected from 10 adult patients with chronic cough. Cough frequency recorded by the LR102 and manual counting of video recordings were well correlated; r=0.87 for number of cough episodes/hour and r=0.89 for number of single coughs/hour. However, the LR102 overestimated cough frequency. The mean difference between the meter and manual counts was 3.8 for cough episodes per hour (P=0.04) and 12.5 for single coughs per hour (P<0.01). This overestimation was due in large part difficulties of the automated system distinguishing between cough episodes and other noise. The shortcomings of the device have not been addressed and there have been no attempts to further develop it (85).
Pulmotrack-CC TM
The fully automated Pulmotrack-CC™ consisted of two contact microphones and a pneumogram belt. In a validation study by the developers it recorded tracheal and chest wall sounds, ambient sounds and chest wall motion in 12 healthy volunteers coughing voluntarily over short periods in 5 different positions: lying supine, sitting, sitting with high level ambient noise, walking and climbing stairs for short time periods (5 minutes/position, totalling 25 minutes per subject). A cough monitoring algorithm was applied, and the tool validated against cough counting by the developers. The device was reported to have a specificity of 94% and sensitivity of 96%. However, during stair climbing specificity dropped to 87% with a sensitivity of 97%. Correlation with manual counts was strong, r=0.94 (86).
However, subsequent independent assessment of the Pulmotrack-CC™ was undertaken using recordings lasting up to 20 hours from 10 patients with chronic cough due to different chronic respiratory conditions. In this context the system was demonstrated to have poor agreement with cough counting by a human investigator, with a sensitivity of only 26% compared to coughs identified by ear (28). Until this lack of consistency is addressed the system cannot be reliably used.
The Hull Automatic Cough Counter (HACC)
The HACC records sound data from a wearable microphone over 24 hours. The signal is analysed to identify sound, and periods of silence are then omitted. The HACC was developed in 33 subjects with chronic cough, 23 of whom contributed data to determine reference cough features, with the remaining 10 subjects used for validation. The HACC was able to significantly reduce counting time compared to manual systems, taking approximately 1 minute 35 seconds to provide a count on 1 hour of data. However, the false positive rate of the automated system was high at almost 20% (sensitivity 80% and specificity 96%) due to inability of the system to distinguish surrounding coughs from those produced by the subject (53). Comparing the HACC to manual counts in 10 subjects demonstrated strong correlation (r=0.87, P≤0.001), but the HACC consistently did not count around a quarter of cough sounds identified by manual counting alone (87). As a result, it has not been adopted.
Cayetano Cough Monitor
The Cayetano Cough Monitor is a semi-automated system comprising a digital recording device and freefield microphone, recently developed by a Peru-based group studying TB (60). The device is reported to have a sensitivity of 75.5% in the ambulatory setting, with a false positive result of 4 events/hour. The operator must review approximately 5% of the total recording time for this sensitivity to be achieved (60). Unlike the other monitoring systems, the developers chose to count coughing bouts, as described above, rather than individual cough sounds which will make comparison with cough frequency measured by other devices difficult. This counting method was chosen as the monitor was found to have a sensitivity of only 51.4% when counting individual cough sounds. The device was used to determine cough frequency in pulmonary TB; 97 adults were enrolled in the study, who contributed 957 recordings. However, 685 of 1,642 (42%) recordings were excluded due to high levels of background noise. The researchers are working to resolve this technical limitation by incorporating accelerometer-based technology (21).
LCM
In 2008, Birring et al. overcame previous limitations of 24-hour cough recording by developing and validating the LCM. The LCM is a small, lightweight system comprising a commercially-available portable digital recording device and free-field lapel microphone. The recorded data is subsequently analysed using an automated algorithm capable of detecting most cough sounds whilst rejecting noncough noises (30,65). Operator input is required only for calibration as a consistency check to improve the specificity of the device, and takes approximately 5 minutes for every 24-hour recording. Validation was initially undertaken with data collected from 15 patients with chronic cough and 8 healthy volunteers. Reported sensitivity and specificity were 91% and 99%, respectively for identification of cough sounds, and a median false positive rate of 2.5 events/patient/hour (30). The LCM was also shown to be repeatable over >3 months, and demonstrated a marginal improvement in repeatability when compared to manually analysed recordings (30).
Further evaluation by an independent researcher from another institution compared non-automated cough counts in 24 h recordings from 7 patients with idiopathic chronic cough to analysis by the machine. Automated and nonautomated cough counts were very similar (mean ± SE: 23±7, compared to 24±6 coughs/patient/h respectively; intra-class correlation coefficient 0.98) (88).
More recent testing of the LCM in 24-hour recordings from 20 individuals (8 healthy volunteers and 12 with chronic cough) showed a sensitivity of the system of 83.8% in patients and 82.3% in healthy volunteers, with a specificity of 99.9% in comparison to counting by ear (89).
The LCM has been used to obtain outcome measure data of cough frequency in a number of studies, including randomised controlled trials of gabapentin (90) and erythromycin (91) in chronic cough, and inhaled sodium cromoglycate in idiopathic pulmonary fibrosis (92). The system also been used successfully for measuring cough frequency in bronchiectasis (93), sarcoidosis (94), COPD (95), and TB (22).
VitaloJAK™
The VitaloJAK™ was developed through collaboration between Vitalograph (a medical diagnostic device company) and the University Hospital of South Manchester. The system uses a combination of a lapel microphone, and contact microphone attached to the upper sternum with a specially-designed ambulatory recording device worn in a belt bag. Rather than generating cough counts by an automated process, the VitaloJAK™ software algorithm subsequently compresses audio recordings by removing all silent periods and the majority of non-cough sounds. Experienced operators then listen to the compressed recordings, each lasting approximately 1.5 hours per 24-hour monitoring period. Coughs are detected using an audio-visual display.
There have so far been two published reports on the evaluation of the software algorithm in a total of 30 individuals comprising 24 patients (with chronic cough, asthma or COPD) and 6 healthy controls (96,97). The developers report an almost zero error rate in transferring the original cough sounds to the condensed recording apart from in one patient with asthma and apparently muffled cough sounds (96). The VitaloJAK™ cough frequency detection system has not undergone separate independent evaluation.
The VitaloJAK™ has been used to detect cough counts in a number of studies in a range of diseases including chronic cough, COPD, pulmonary fibrosis, asthma and cystic fibrosis (98)(99)(100)(101)(102). It has also successfully provided positive primary outcome efficacy data in Phase 2 studies of gefapixant in unexplained chronic cough (19,20).
Comparing the LCM and the VitaloJAK™
The LCM and VitaloJAK™ are the two most widely-used cough monitoring systems to date. They have not been directly compared, but lead to very similar 24-hour cough counts in similar types of patients . Both systems can record data continuously for 24 hours, with the LCM capable of doing so for up to 4 days (106). The LCM requires significantly less operator time. Conversely VitaloJAK™, mainly due to the greater human operator may have greater accuracy, in terms of correctly detecting all cough events, but this is difficult to quantify from published data. The VitaloJAK™ has been used in children (107) and is safety tested as a medical device, although the LCM too could probably undergo the necessary adaptation and validation process to demonstrate its use in children.
The VitaloJAK™ recording system was designed specifically for cough sounds, whereas the audio capture equipment of the LCM was developed primarily for recording speech. However, as the LCM employs a microphone of high frequency response, and digitally records sound with sampling rate appropriate to cough sounds it is unclear if the bespoke recorder offers any advantage (30). The LCM recording is smaller and lighter, making it potentially more practical and acceptable to the wearer.
The lack of a chest wall contact microphone might make the LCM algorithm potentially more likely to overestimate cough counts if other individuals in the vicinity are also coughing. However, contact microphones have the disadvantage of being highly sensitive to noise from movement artefact, as previously discussed (41). Furthermore, due to the characteristics of the free-field microphone used, and the fact that distant-sounding coughs can be filtered out by the operator during the calibration phase of audio analysis, the accidental detection of background coughs might not be a significant problem in clinical use (108). Kulnik et al. tested the LCM on a hospital ward in which the wearer of the device and others in the background were prompted to cough. An observer remained present and counted coughs from both the study subject and others in the background in real time. Agreement between cough counts of the subjects from the LCM and the observer was extremely high (108).
The lack of direct skin contact with the LCM might make the monitor less noticeable by the wearer, potentially important if trying to determine 'usual' cough frequency. Conversely, as the LCM is easier to remove and replace, the wearer might potentially be more tempted to do this during a recording period. This might be easily more recognised with the VitaloJAK™ system as a lack of recorded data from the contact microphone, and the greater human input into analysis of recordings.
The relatively low unit cost of the LCM recording equipment allows it better feasibility for recordings in the home environment. In particular this facilitates sending the equipment by mail at the end of a recording period, rather than delivering it in person, increasing convenience for both study subjects and researchers. The low cost of the LCM hardware has also made it well-suited for independently-led collaborative non-commercial research, including in TB in East London and South Africa (22,109).
The two systems therefore have different strengths and weaknesses and should probably be seen as complementary, both having supported significant recent advances in cough research ( Table 1).
Future directions
At the present time cough monitoring very much remains a niche research tool. No well-validated automated cough frequency system is currently freely or commercially available. Both the LCM and the VitaloJAK™ systems offer substantial advances on the only previous alternatives for cough counting, but as discussed they have limitations, and are currently only accessible through research collaboration with the developers. Given the advances in the last two decades in computer and mobile technology, including in particular in speech recognition, developing a cough monitor with the ideal characteristics mentioned above should not seem beyond expectations. However, there has been very little financial investment in cough counting tools to date.
Despite the ease with which the human ear recognises coughs (presumably for reasons which have been favoured by natural selection), automated cough detection remains a challenge. As previously discussed, the approaches taken to automatic speech recognition are relevant, but owing to the noise-like qualities of cough compared to speech sounds, are only part of the solution. Further research focus is needed. Algorithms should be personalised to adapt to the cough sounds of the subject under observation in order that the coughs of others in the vicinity are ignored.
Future systems should also ideally operate in real time, recognising coughs as soon as they occur and ignoring all other sounds. In this way capturing sound information over the entirety of the recording period would become redundant. The currently-used cough monitors discussed above have this requirement, creating large digital data files for analysis at a later time point. Such an advance would J Thorac Dis 2020;12(9):5207-5223 | http://dx.doi.org/10.21037/jtd-2020-icc-003 Ongoing developments in cough counting tools, and the generation of more data, should go hand in hand with the wider application of cough frequency measurement, which in turn should lead to further advances in technology. Basic questions remain poorly answered in cough research, regarding, for example, the epidemiology of chronic cough in the general population, the normal range of cough frequency in health, and the extent to which cough counts vary from day to day within individuals in health and in stable respiratory disease. These points are key to better definitions of disease, for measuring responses to treatments, and for powering clinical trials, and should be addressed as cough frequency monitoring becomes more commonplace.
Duration of cough monitoring
The optimum duration for measuring cough frequency is not defined, but will probably relate in part to the question being asked. Clinical trials of new antitussives have recently used both 24-hour and daytime cough frequency (19,110).
It is not clear which is superior. It has been suggested that continuous monitoring for periods of only a few hours might be sufficient surrogate to assess daily cough frequency (111), but more data are needed. As technology now allows, recording periods of 48 h or longer may be preferred. This would not only allow more account to be taken of normal diurnal and inter-day variations in cough frequency, but also of any effect on cough frequency of wearing a monitoring device.
This theoretical effect has long been recognised but has not been quantified; cough monitoring may alter the subject's awareness of their cough, leading to fewer or more voluntary coughs, or avoiding activities which produce more coughing such as smoking (32,112). Wearing a cough monitor over longer periods than 24 h might allow the subject to become accustomed to the device and not adapt behaviour as a result.
Novel applications
In a proof-of-concept study, Crooks et al. have demonstrated declines in cough frequency during recovery from exacerbations of COPD. Serial recordings were made with the LCM over several weeks in the home environment (113). Cough monitoring could potentially be similarly used to detect the early phases of COPD exacerbations, leading to more prompt interventions to mitigate against deterioration. This approach is also being investigated in asthma (61,114), and could combine well with telemedicine for remote monitoring by health teams (115).
Cough frequency measurement could be a novel objective marker of disease severity, and of directly monitoring response to treatment. This has been investigated in TB (21,116), a disease in which the currently-used objective markers of treatment response, including weight gain, radiographic improvement, and sterilisation of sputum microscopy are limited in responsiveness, sensitivity and specificity (117,118). Cough monitoring could also impact on the management of interstitial lung diseases, also in need of better clinical markers of severity and treatment response (119).
Cough frequency has been investigated in the 1960s as a marker of infectiousness in TB (120), but only recently been shown to be feasible and potentially valuable using modern technology (22). Improved identification of the most infectious individuals would have clear advantages to the control of disease transmission (121). Screening for disease in apparently healthy individuals, particularly in its early stages has attracted a lot of attention, particularly in lung cancer (122,123) and TB (124), and also has clear applications for the control of other respiratory infections (125). Cough counting tools could lend themselves well to screening, cough being a common and early feature of respiratory conditions. Cough monitoring would clearly first have to become much simplified and more widespread.
Mobile device technology
In early 2020 approximately 45% of the world's population owned a smartphone, up to >90% in some parts of the world (126). Such devices commonly incorporate microphones, software and processors of sufficient specification to support accurate speech recognition systems. The potential adaptation of mobile devices as cough monitors therefore is very attractive.
One such smartphone application is currently in development and records, encrypts and transmits data to a remote, secure cloud server for automated analysis. This system awaits proper validation (127) and others are development (71). The continuous transmission of data for analysis remotely would circumvent the need to embed further signal processing capability for real-time cough detection within mobile phones themselves. However, the computational cost of running complex processing and classification algorithms, and the challenge of low signal-tonoise ratio for real-time cough detection could potentially be overcome with advances and adaptations in machine learning algorithms as discussed (71).
Smart electronic speakers are rapidly becoming more present in the home environment, and could also potentially adapted to monitor cough (128). The adaptation of commonly-owned electronic devices to monitor health is already proven, for example in screening for atrial fibrillation with a wristwatch (129), and should make cough counting technology more widely available, practical and acceptable to individuals.
Conclusions
We predict that the recent increased interest in the field of cough, accompanied by significant potential breakthroughs in new antitussive treatments, and rapid advances in mobile technologies and signal processing will lead to improvements and the much wider use of cough counting tools. Machines may ultimately surpass the human ear's ability to detect cough, perhaps removing any operator dependence of the systems.
With the increasing automation of modern life and reliance on technology such devices should have appeal not only to clinicians but also to patients alike, allowing them increased facility to monitor and manage their own condition. For healthcare services, they may have the added benefit of providing a remote method for tracking patient cohorts, and for screening for disease.
Footnote
Provenance and Peer Review: This article was commissioned by the Guest Editor (Kefang Lai) for the series "3rd International Cough Conference" published in Journal of Thoracic Disease. The article was sent for external peer review.
Conflicts of Interest: All authors have completed the ICMJE uniform disclosure form (available at http:// dx.doi.org/10.21037/jtd-2020-icc-003). The series "3rd International Cough Conference" was commissioned by the editorial office without any funding or sponsorship. Dr.
SB reports other from Avalyn, other from Patara, outside the submitted work; and Dr. SB is developer of LCM. The other authors have no other conflicts of interest to declare.
Ethical Statement: The authors are accountable for all aspects of the work in ensuring that questions related to the accuracy or integrity of any part of the work are appropriately investigated and resolved.
Open Access Statement: This is an Open Access article distributed in accordance with the Creative Commons Attribution-NonCommercial-NoDerivs 4.0 International License (CC BY-NC-ND 4.0), which permits the noncommercial replication and distribution of the article with the strict proviso that no changes or edits are made and the original work is properly cited (including links to both the formal publication through the relevant DOI and the license). See: https://creativecommons.org/licenses/by-nc-nd/4.0/. |
import java.awt.*;
import java.awt.event.*;
import javax.swing.*;
public class ChatClientSwing implements ActionListener {
private JFrame chatFrame;
private JPanel chatPanel;
private JTextField inputField;
private JList<String> list;
private DefaultListModel<String> listModel;
private JTextArea textArea;
private MySocket sc;
public DefaultListModel<String> getListModel() {
return listModel;
}
public MySocket getSocket() {
return sc;
}
public JTextArea getTextArea() {
return textArea;
}
public ChatClientSwing(MySocket socket) {
sc = socket;
// Schedule creation and show of GUI
javax.swing.SwingUtilities.invokeLater(new Runnable() {
public void run() {
try {
createAndShowGUI();
} catch(Exception e){
e.printStackTrace();
}
}
});
// Output thread
new ClientOutputThreadSwing(this).start();
}
private void setOnInputField(final String message) {
javax.swing.SwingUtilities.invokeLater(new Runnable() {
public void run() {
try {
inputField.setText(message);
} catch(Exception e){
e.printStackTrace();
}
}
});
}
public void actionPerformed(ActionEvent event) {
// Gets the text from inputField (clearing it afterwards), adds the
// message to the text area and writes it to the socket to send it
// to the server
String message = inputField.getText();
setOnInputField("");
try {
sc.println(message);
} catch(Exception e){
e.printStackTrace();
}
}
private void createAndShowGUI() throws Exception {
//Set the look and feel.
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
//Make sure we have nice window decorations.
JFrame.setDefaultLookAndFeelDecorated(true);
// Create and set up the window.
JFrame frame = new JFrame("Xat");
frame.setLayout(new BorderLayout(5,5));
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// Create an output JPanel and add a JTextArea(20, 30) inside a JScrollPane
JPanel out = new JPanel();
out.setLayout(new BoxLayout(out,BoxLayout.LINE_AXIS));
out.setBorder(BorderFactory.createEmptyBorder(5,5,0,5));
textArea = new JTextArea(20,30);
textArea.setEditable(false);
out.add(new JScrollPane(textArea));
listModel = new DefaultListModel<>();
list = new JList<>(listModel);
JScrollPane listScrollPane = new JScrollPane(list);
out.add(listScrollPane, BorderLayout.CENTER);
// Create an input JPanel and add a JTextField(25) and a JButton
JPanel inp = new JPanel();
inp.setLayout(new BoxLayout(inp,BoxLayout.LINE_AXIS));
inputField = new JTextField();
JButton button = new JButton("Send");
inp.add(inputField);
inp.add(button);
// Listen to events from the inputField button.
inputField.addActionListener(this);
// add panels to main frame
frame.add(out, BorderLayout.CENTER);
frame.add(inp, BorderLayout.PAGE_END);
//Display the window centered.
frame.setSize(500,400);
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
public static void main(String[] args) throws Exception {
new ChatClientSwing(new MySocket(args[0], Integer.parseInt(args[1])));
}
}
|
#ifndef DNest4_Data
#define DNest4_Data
#include <vector>
#include <algorithm>
#include <cmath>
class Data
{
private:
std::vector<double> t, y, sig;
public:
Data();
//void load(const char* filename);
void load(const char* filename, const char* units, int skip=2);
int index_fibers;
const char* datafile;
const char* dataunits;
int dataskip;
// Getters
int N() const {return t.size();}
const std::vector<double>& get_t() const { return t; }
double get_t_min() const { return *std::min_element(t.begin(), t.end()); }
double get_t_max() const { return *std::max_element(t.begin(), t.end()); }
double get_t_middle() const { return get_t_min() + 0.5*(get_t_max() - get_t_min()); }
const std::vector<double>& get_y() const { return y; }
double get_y_min() const { return *std::min_element(y.begin(), y.end()); }
double get_y_max() const { return *std::max_element(y.begin(), y.end()); }
double get_RV_span() const { return get_y_max() - get_y_min(); }
double get_RV_var() const;
double get_RV_std() const { return std::sqrt(get_RV_var()); }
const std::vector<double>& get_sig() const { return sig; }
double topslope() const {return std::abs(get_y_max() - get_y_min()) / (t.back() - t.front());}
// Singleton
private:
static Data instance;
public:
static Data& get_instance() { return instance; }
};
#endif
|
<reponame>ml-tv/tv-api<gh_stars>0
package handlers_test
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/ml-tv/tv-api/src/components/users/handlers"
"github.com/ml-tv/tv-api/src/components/users/payloads"
"github.com/ml-tv/tv-api/src/components/users/routes"
"github.com/ml-tv/tv-api/src/core/network/http/httptests"
"github.com/ml-tv/tv-api/src/core/primitives/models/lifecycle"
"github.com/ml-tv/tv-api/src/core/security/auth/testdata"
"github.com/stretchr/testify/assert"
)
func callGetUser(t *testing.T, params *handlers.GetUserParams, auth *httptests.RequestAuth) *httptest.ResponseRecorder {
ri := &httptests.RequestInfo{
Endpoint: routes.UserEndpoints[routes.EndpointGetUser],
Params: params,
Auth: auth,
}
return httptests.NewRequest(t, ri)
}
func TestGetUser(t *testing.T) {
defer lifecycle.PurgeModels(t)
u1, s1 := testdata.NewAuth(t)
u2, s2 := testdata.NewAuth(t)
tests := []struct {
description string
code int
params *handlers.GetUserParams
auth *httptests.RequestAuth
}{
{
"Not logged",
http.StatusOK,
&handlers.GetUserParams{ID: u1.ID},
nil,
},
{
"Getting an other user",
http.StatusOK,
&handlers.GetUserParams{ID: u1.ID},
httptests.NewRequestAuth(s2.ID, u2.ID),
},
{
"Getting own data",
http.StatusOK,
&handlers.GetUserParams{ID: u1.ID},
httptests.NewRequestAuth(s1.ID, u1.ID),
},
{
"Getting un-existing user with valid ID",
http.StatusNotFound,
&handlers.GetUserParams{ID: "f76700e7-988c-4ae9-9f02-ac3f9d7cd88e"},
nil,
},
{
"Getting un-existing user with invalid ID",
http.StatusBadRequest,
&handlers.GetUserParams{ID: "invalidID"},
nil,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
rec := callGetUser(t, tc.params, tc.auth)
assert.Equal(t, tc.code, rec.Code)
if httptests.Is2XX(rec.Code) {
var u payloads.User
if err := json.NewDecoder(rec.Body).Decode(&u); err != nil {
t.Fatal(err)
}
if assert.Equal(t, tc.params.ID, u.ID, "Not the same user") {
// User access their own data
if tc.auth != nil && u.ID == tc.auth.UserID {
assert.NotEmpty(t, u.Email, "Same user needs their private data")
} else { // user access an other user data
assert.Empty(t, u.Email, "Should not return private data")
}
}
}
})
}
}
|
import numpy as np
from continual_rl.experiments.environment_runners.environment_runner_sync import EnvironmentRunnerSync
from continual_rl.experiments.tasks.task_base import TaskSpec
from tests.common_mocks.mock_policy.mock_policy import MockPolicy
from tests.common_mocks.mock_policy.mock_policy_config import MockPolicyConfig
from tests.common_mocks.mock_policy.mock_timestep_data import MockTimestepData
from tests.common_mocks.mock_preprocessor import MockPreprocessor
class MockEnv(object):
def __init__(self):
self.actions_executed = []
self.reset_count = 0
self.observation_space = [1, 2, 3]
self.action_space = [4, 5]
def seed(self, seed):
self.seed_set = seed
def reset(self):
self.reset_count += 1
return np.array([0, 1, 2])
def step(self, action):
self.actions_executed.append(action)
observation = np.array([12, 13, 14])
reward = 1.5
done = action == 4 # Simple way to force the done state we want
return observation, reward, done, {"info": "unused"}
def close(self):
pass
class TestEnvironmentRunnerSync(object):
def test_collect_data_simple_success(self, monkeypatch):
"""
Setup the simple happy-path for collect_data, and make sure things are being populated correctly.
Simple: no done=True, no rewards returned, etc.
"""
# Arrange
def mock_compute_action(_, observation, task_id, action_space_id, last_timestep_data, eval_mode):
action = [3]
timestep_data = MockTimestepData(data_to_store=(observation, task_id, action_space_id, eval_mode))
if last_timestep_data is None:
timestep_data.memory = 0
else:
timestep_data.memory = last_timestep_data.memory + 1
return action, timestep_data
# Mock the policy we're running; action_space and observation_space not used.
mock_policy = MockPolicy(MockPolicyConfig(), action_spaces=None, observation_space=None)
monkeypatch.setattr(MockPolicy, "compute_action", mock_compute_action)
# The object under test
runner = EnvironmentRunnerSync(policy=mock_policy, timesteps_per_collection=123)
# Arguments to collect_data
# Normally should create a new one each time, but doing this for spying
mock_env = MockEnv()
mock_env_spec = lambda: mock_env
# MockEnv is used for determining that parameters are getting generated and passed correctly
task_spec = TaskSpec(task_id=5, action_space_id=3, preprocessor=MockPreprocessor(), env_spec=mock_env_spec,
num_timesteps=9718, eval_mode=1964)
# Act
timesteps, collected_data, rewards_reported, _ = runner.collect_data(task_spec)
# Assert
# Basic return checks
assert timesteps == 123, f"Number of timesteps returned inaccurate. Got {timesteps}."
assert len(collected_data) == 1, f"Amount of collected data unexpected. Got {len(collected_data)}."
assert len(collected_data[0]) == 123, f"Amount of collected data unexpected. Got {len(collected_data[0])}."
assert len(rewards_reported) == 0, "Rewards were reported when none were expected."
# Check that MockTimestepData is getting properly updated
collected_data = collected_data[0]
assert isinstance(collected_data[0], MockTimestepData), "Unexpected TimestepData returned."
assert np.all(np.array([entry.reward for entry in collected_data]) == 1.5), \
"MockTimestepData not correctly populated with reward."
assert not np.any(np.array([entry.done for entry in collected_data])), \
"MockTimestepData not correctly populated with done."
assert collected_data[0].memory == 0, "compute_action not correctly receiving last_timestep_data."
assert collected_data[1].memory == 1, "compute_action not correctly receiving last_timestep_data."
assert collected_data[78].memory == 78, "compute_action not correctly receiving last_timestep_data."
# Check that the observation is being created correctly
observation_to_policy, received_task_id, received_action_space_id, observed_eval_mode = collected_data[0].data_to_store
assert received_task_id == 5, "task_id getting intercepted somehow."
assert received_action_space_id == 3, "action_space_id getting intercepted somehow."
assert observation_to_policy.shape[0] == 1, "'Fake' batch missing"
assert observed_eval_mode == 1964, "Eval_mode not passed correctly"
# 3 is from how MockEnv is written, which returns observations of length 3
assert observation_to_policy.shape[1] == 3, "Incorrect obs shape"
# Use our environment spy to check it's being called correctly
assert mock_env.reset_count == 1, f"Mock env reset an incorrect number of times: {mock_env.reset_count}"
assert len(mock_env.actions_executed) == 123, "Mock env.step not called a sufficient number of times"
assert np.all(np.array(mock_env.actions_executed) == 3), "Incorrect action taken"
assert mock_env.seed_set is not None, "Seed not being set"
def test_collect_data_with_intermediate_dones(self, monkeypatch):
"""
Setup an environment that gives "done" at some point during the run
"""
# Arrange
current_step = 0
def mock_compute_action(_, observation, task_id, action_space_id, last_timestep_data, eval_mode):
nonlocal current_step
action = [4] if current_step == 73 else [3] # 4 is the "done" action, 3 is arbitrary
current_step += 1
timestep_data = MockTimestepData(data_to_store=(observation, task_id, action_space_id, eval_mode))
if last_timestep_data is None:
timestep_data.memory = 0
else:
timestep_data.memory = last_timestep_data.memory + 1
return action, timestep_data
# Mock the policy we're running. action_space and observation_space not used.
mock_policy = MockPolicy(MockPolicyConfig(), action_spaces=None, observation_space=None)
monkeypatch.setattr(MockPolicy, "compute_action", mock_compute_action)
# The object under test
runner = EnvironmentRunnerSync(policy=mock_policy, timesteps_per_collection=123)
# Arguments to collect_data
# Normally should create a new one each time, but doing this for spying
mock_env = MockEnv()
mock_env_spec = lambda: mock_env
# MockEnv is used for determining that parameters are getting generated and passed correctly
task_spec = TaskSpec(task_id=9, action_space_id=6, preprocessor=MockPreprocessor(), env_spec=mock_env_spec,
num_timesteps=9718, eval_mode=1964)
# Act
timesteps, collected_data, rewards_reported, _ = runner.collect_data(task_spec)
# Assert
# Basic return checks
assert timesteps == 123, f"Number of timesteps returned inaccurate. Got {timesteps}."
assert len(collected_data) == 1, f"Amount of collected data unexpected. Got {len(collected_data)}."
assert len(collected_data[0]) == 123, f"Amount of collected data unexpected. Got {len(collected_data[0])}."
assert len(rewards_reported) == 1, "Rewards were not reported when one was expected."
assert rewards_reported[0] == 74 * 1.5, f"Value of reward reported unexpected {rewards_reported}"
# Check that MockTimestepData is getting properly updated
collected_data = collected_data[0]
assert isinstance(collected_data[0], MockTimestepData), "Unexpected TimestepData returned."
assert not np.any(np.array([entry.done for entry in collected_data[:73]])), \
"MockTimestepData not correctly populated with done."
assert not np.any(np.array([entry.done for entry in collected_data[74:]])), \
"MockTimestepData not correctly populated with done."
assert collected_data[73].done, "MockTimestepData not correctly populated with done."
assert collected_data[78].memory == 78, "compute_action not correctly receiving last_timestep_data. " \
"(Always populated, even if a done occurred.)"
# Check that the observation is being created correctly
observation_to_policy, received_task_id, received_action_space_id, observed_eval_mode = collected_data[0].data_to_store
assert received_task_id == 9, "task_id getting intercepted somehow."
assert received_action_space_id == 6, "action_space_id getting intercepted somehow."
assert observation_to_policy.shape[0] == 1, "'Fake' batch appearing in correctly"
assert observed_eval_mode == 1964, "Eval_mode not passed correctly"
# 3 is from how MockEnv is written, which returns observations of length 3
assert observation_to_policy.shape[1] == 3, "Incorrect obs shape"
# Use our environment spy to check it's being called correctly
assert mock_env.reset_count == 2, f"Mock env reset an incorrect number of times: {mock_env.reset_count}"
assert len(mock_env.actions_executed) == 123, "Mock env.step not called a sufficient number of times"
assert np.all(np.array(mock_env.actions_executed[:73]) == 3), "Incorrect action taken, first half"
assert np.all(np.array(mock_env.actions_executed[74:]) == 3), "Incorrect action taken, second half"
assert np.array(mock_env.actions_executed)[73] == 4, "Incorrect action taken at the 'done' step."
def test_collect_data_multi_collect_before_done(self, monkeypatch):
"""
Run two data collections, and
"""
# Arrange
# Mock methods
current_step = 0
def mock_compute_action(_, observation, task_id, action_space_id, last_timestep_data, eval_mode):
nonlocal current_step
action = [4] if current_step == 73 else [3] # 4 is the "done" action, 3 is arbitrary
current_step += 1
timestep_data = MockTimestepData(data_to_store=(observation, action_space_id, eval_mode))
if last_timestep_data is None:
timestep_data.memory = 0
else:
timestep_data.memory = last_timestep_data.memory + 1
return action, timestep_data
# Mock the policy we're running. action_space and observation_space not used.
mock_policy = MockPolicy(MockPolicyConfig(), action_spaces=None, observation_space=None)
monkeypatch.setattr(MockPolicy, "compute_action", mock_compute_action)
# The object under test
runner = EnvironmentRunnerSync(policy=mock_policy, timesteps_per_collection=50)
# Arguments to collect_data
# Normally should create a new one each time, but doing this for spying
mock_env = MockEnv()
mock_env_spec = lambda: mock_env
# MockEnv is used for determining that parameters are getting generated and passed correctly
task_spec = TaskSpec(task_id=13, action_space_id=6, preprocessor=MockPreprocessor(), env_spec=mock_env_spec,
num_timesteps=9718, eval_mode=1964)
# Act
timesteps_0, collected_data_0, rewards_reported_0, _ = runner.collect_data(task_spec)
timesteps_1, collected_data_1, rewards_reported_1, _ = runner.collect_data(task_spec)
# Assert
# Basic return checks
assert timesteps_0 == timesteps_1 == 50, f"Number of timesteps returned inaccurate. " \
f"Got {(timesteps_0, timesteps_1)}."
assert len(collected_data_0[0]) == len(collected_data_1[0]) == 50, f"Amount of collected data unexpected. " \
f"Got {(len(collected_data_0), len(collected_data_1))}."
assert len(rewards_reported_0) == 0, "Rewards were reported when none were expected."
assert len(rewards_reported_1) == 1, "Rewards were not reported when one was expected."
assert rewards_reported_1[0] == 74 * 1.5, f"Value of reward reported unexpected {rewards_reported_1}"
# Check that MockTimestepData is getting properly updated
collected_data_0 = collected_data_0[0]
collected_data_1 = collected_data_1[0]
assert not np.any(np.array([entry.done for entry in collected_data_0])), \
"MockTimestepData not correctly populated with done."
assert not np.any(np.array([entry.done for entry in collected_data_1[:23]])), \
"MockTimestepData not correctly populated with done."
assert not np.any(np.array([entry.done for entry in collected_data_1[24:]])), \
"MockTimestepData not correctly populated with done."
assert collected_data_1[23].done, "MockTimestepData not correctly populated with done."
assert collected_data_1[45].memory == 95, "MockTimestepData not correctly populated with done."
# Use our environment spy to check it's being called correctly
assert mock_env.reset_count == 2, f"Mock env reset an incorrect number of times: {mock_env.reset_count}"
assert len(mock_env.actions_executed) == 100, "Mock env.step not called a sufficient number of times"
assert np.all(np.array(mock_env.actions_executed[:73]) == 3), "Incorrect action taken, first half"
assert np.all(np.array(mock_env.actions_executed[74:]) == 3), "Incorrect action taken, second half"
assert np.array(mock_env.actions_executed)[73] == 4, "Incorrect action taken at the 'done' step."
|
// Decodes a four symbols in parallel using the given tables.
static inline uint32_t RansSimdDecSym(RansSimdDec* r, RansWordTables const* tab)
{
__m128i freq_bias_lo, freq_bias_hi, freq_bias;
__m128i freq, bias;
__m128i xscaled;
__m128i x = r->simd;
__m128i slots = _mm_and_si128(x, _mm_set1_epi32(RANS_WORD_M - 1));
uint32_t i0 = (uint32_t) _mm_cvtsi128_si32(slots);
uint32_t i1 = (uint32_t) _mm_extract_epi32(slots, 1);
uint32_t i2 = (uint32_t) _mm_extract_epi32(slots, 2);
uint32_t i3 = (uint32_t) _mm_extract_epi32(slots, 3);
uint32_t s = tab->slot2sym[i0] | (tab->slot2sym[i1] << 8) | (tab->slot2sym[i2] << 16) | (tab->slot2sym[i3] << 24);
freq_bias_lo = _mm_cvtsi32_si128(tab->slots[i0].u32);
freq_bias_lo = _mm_insert_epi32(freq_bias_lo, tab->slots[i1].u32, 1);
freq_bias_hi = _mm_cvtsi32_si128(tab->slots[i2].u32);
freq_bias_hi = _mm_insert_epi32(freq_bias_hi, tab->slots[i3].u32, 1);
freq_bias = _mm_unpacklo_epi64(freq_bias_lo, freq_bias_hi);
xscaled = _mm_srli_epi32(x, RANS_WORD_SCALE_BITS);
freq = _mm_and_si128(freq_bias, _mm_set1_epi32(0xffff));
bias = _mm_srli_epi32(freq_bias, 16);
r->simd = _mm_add_epi32(_mm_mullo_epi32(xscaled, freq), bias);
return s;
} |
#
# License: BSD
# https://raw.githubusercontent.com/stonier/groot_rocker_extensions/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
This is the top-level namespace of the groot_rocker_extensions package.
"""
##############################################################################
# Imports
##############################################################################
from . import bind # noqa
from . import colcon # noqa
from . import development_environment # noqa
from . import git # noqa
from . import main # noqa
from . import named_prompt # noqa
from . import nvidia # noqa
from . import snorriheim # noqa
from . import ssh # noqa
from . import pulse_audio # noqa
from . import user # noqa
from . import work_directory # noqa
|
<filename>test/com/tvl/util/TestExtensionMethods.java
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package com.tvl.util;
import java.util.Map;
import org.junit.Assert;
final class TestExtensionMethods {
private static final double GOLDEN_RATIO = (1 + Math.sqrt(5)) / 2;
static <K, V> Map<K, V> toReadOnlyMap(ImmutableMap<K, V> map) {
Requires.notNull(map, "map");
return toBuilder(map);
}
static <K, V> Map<K, V> toBuilder(ImmutableMap<K, V> map) {
Requires.notNull(map, "map");
if (map instanceof ImmutableHashMap<?, ?>) {
return ((ImmutableHashMap<K, V>)map).toBuilder();
}
//if (map instanceof ImmutableTreeMap<K, V>) {
// return ((ImmutableTreeMap<K, V>)map).toBuilder();
//}
throw new UnsupportedOperationException();
}
/**
* Verifies that a binary tree is balanced according to AVL rules.
*
* @param node The root node of the binary tree.
*/
static void verifyBalanced(BinaryTree<?> node) {
if (node.getLeft() != null) {
verifyBalanced(node.getLeft());
}
if (node.getRight() != null) {
verifyBalanced(node.getRight());
}
if (node.getRight() != null && node.getLeft() != null) {
assertInRange(node.getLeft().getHeight() - node.getRight().getHeight(), -1, 1);
} else if (node.getRight() != null) {
assertInRange(node.getRight().getHeight(), 0, 1);
} else if (node.getLeft() != null) {
assertInRange(node.getLeft().getHeight(), 0, 1);
}
}
private static void assertInRange(int value, int minimum, int maximumInclusive) {
Assert.assertTrue(value >= minimum);
Assert.assertTrue(value <= maximumInclusive);
}
static void verifyHeightIsWithinTolerance(BinaryTree<?> node) {
verifyHeightIsWithinTolerance(node, null);
}
/**
* Verifies that a binary tree is no taller than necessary to store the data if it were optimally balanced.
*
* @param node The root node.
* @param count The number of nodes in the tree. May be {@code null} if {@link BinaryTree#size()} is functional.
*/
static void verifyHeightIsWithinTolerance(BinaryTree<?> node, Integer count) {
// http://en.wikipedia.org/wiki/AVL_tree
double heightMustBeLessThan = log(2, GOLDEN_RATIO) * log(Math.sqrt(5) * ((count != null ? count : node.size()) + 2), 2) - 2;
Assert.assertTrue(node.getHeight() < heightMustBeLessThan);
}
private static double log(double value, double base) {
return Math.log(value) / Math.log(base);
}
private TestExtensionMethods() {
}
}
|
/** Changes the type of a value by calling {@link Value#as(DataType)}. */
public class ExplicitCast extends Expression implements ValueHolder {
private final DataType targetType;
private final ValueHolder target;
/**
* Creates an {@link ExplicitCast}.
*
* @param targetType is the type that the target should be casted to.
* @param target is the {@link Value} that gets casted.
*/
public ExplicitCast(int lineID, DataType targetType, ValueHolder target) {
super(lineID, MERGED);
this.targetType = targetType;
this.target = target;
if (targetType == null || target == null)
throw new AssertionError("Targettype and valueholder cannot be null.");
}
/**
* Returns the value of {@link #target}, casted to the {@link #targetType}.
*
* @throws CastingException, if the cast isn't supported.
*/
@Override
public Value getValue() throws CastingException {
return target.as(targetType);
}
} |
Antiviral Therapy Reduces Risk of Cirrhosis in Noncirrhotic HBV Patients Among 4 Urban Safety-Net Health Systems.
INTRODUCTION
To evaluate the impact of chronic hepatitis B virus infection (CHB) treatment on risk of cirrhosis, liver-related outcomes, and death among a diverse CHB cohort with a large proportion of African Americans.
METHODS
Adults with noncirrhotic CHB without human immunodeficiency virus from 2010 to 2018 were retrospectively evaluated across 4 US safety-net health systems. CHB was identified with International Classification of Diseases, Ninth Revision/Tenth Revision diagnosis coding and confirmatory laboratory data. Propensity-score matching, Kaplan-Meier methods, and adjusted Cox proportional hazards models were used to evaluate impact of CHB treatment on risk of cirrhosis, hepatocellular carcinoma (HCC), death, and composite of cirrhosis, HCC, or death.
RESULTS
Among 4,064 CHB patients (51.9% female, 42.0% age <45 years, 31.6% African American, 26.6% Asian, 26.7% Hispanic), 23.2% received CHB antiviral therapy and 76.8% did not. Among the propensity score-matched cohort (428 treated and 428 untreated), CHB treatment was associated with lower risk of cirrhosis (hazards ratio 0.65, 95% confidence interval 0.46-0.92, P = 0.015) and composite of cirrhosis, HCC, or death (hazards ratio 0.67, 95% confidence interval 0.49-0.94, P = 0.023). Females vs males and African Americans vs non-Hispanic whites had significantly lower risk of cirrhosis. When treatment effects were stratified by age, sex, and ethnicity, the benefits of antiviral therapies in reducing risk of cirrhosis were seen primarily in CHB patients who were females, age <45 years, and of Asian ethnicity.
DISCUSSION
Our propensity score-matched cohort of noncirrhotic CHB patients demonstrated significant reductions in risk of cirrhosis due to CHB treatment. |
/**
* Base implementation of Kinesis Input Operator. Fetches records from kinesis and emits them as tuples.<br/>
* <p>
* <b>Partition Strategy:</b>
* <p><b>1. ONE_TO_ONE partition</b> Each operator partition will consume from only one Kinesis shard </p>
* <p><b>2. ONE_TO_MANY partition</b> Each operator partition will consume from more than one kinesis
* shard. Dynamic partition is enable by setting the {@link #shardsPerPartition} value > 1</p>
* <p/>
* Configurations:<br/>
* {@link #accessKey} : AWS Credentials AccessKeyId <br/>
* {@link #secretKey} : AWS Credentials SecretAccessKey <br/>
* streamName : Name of the stream from where the records to be accessed
*
* @param <T>
* @since 2.0.0
*/
@SuppressWarnings("rawtypes")
public abstract class AbstractKinesisInputOperator <T> implements InputOperator, ActivationListener<OperatorContext>, Partitioner<AbstractKinesisInputOperator>, StatsListener,Operator.CheckpointNotificationListener
{
private static final Logger logger = LoggerFactory.getLogger(AbstractKinesisInputOperator.class);
@Min(1)
private int maxTuplesPerWindow = Integer.MAX_VALUE;
private int emitCount = 0;
@NotNull
private String accessKey;
@NotNull
private String secretKey;
private String endPoint;
protected WindowDataManager windowDataManager;
protected transient long currentWindowId;
protected transient int operatorId;
protected final transient Map<String, KinesisPair<String, Integer>> currentWindowRecoveryState;
@Valid
protected KinesisConsumer consumer = new KinesisConsumer();
// By default the partition policy is 1:1
public PartitionStrategy strategy = PartitionStrategy.ONE_TO_ONE;
private transient OperatorContext context = null;
// Store the current partition info
private transient Set<PartitionInfo> currentPartitionInfo = new HashSet<PartitionInfo>();
protected transient Map<String, String> shardPosition = new HashMap<String, String>();
private ShardManager shardManager = null;
// Minimal interval between 2 (re)partition actions
private long repartitionInterval = 30000L;
// Minimal interval between checking collected stats and decide whether it needs to repartition or not.
private long repartitionCheckInterval = 5000L;
private transient long lastCheckTime = 0L;
private transient long lastRepartitionTime = 0L;
private transient boolean isReplayState = false;
//No of shards per partition in dynamic MANY_TO_ONE strategy
// If the value is more than 1, then it enables the dynamic partitioning
@Min(1)
private Integer shardsPerPartition = 1;
@Min(1)
private int initialPartitionCount = 1;
private transient List<String> newWaitingPartition = new LinkedList<String>();
/**
* This output port emits tuples extracted from Kinesis data records.
*/
public final transient DefaultOutputPort<T> outputPort = new DefaultOutputPort<T>();
public AbstractKinesisInputOperator()
{
windowDataManager = new FSWindowDataManager();
currentWindowRecoveryState = new HashMap<String, KinesisPair<String, Integer>>();
}
/**
* Derived class has to implement this method, so that it knows what type of message it is going to send to Malhar.
* It converts a ByteBuffer message into a Tuple. A Tuple can be of any type (derived from Java Object) that
* operator user intends to.
*
* @param rc Record to convert into tuple
*/
public abstract T getTuple(Record rc);
@Override
public void partitioned(Map<Integer, Partition<AbstractKinesisInputOperator>> partitions)
{
// update the last repartition time
lastRepartitionTime = System.currentTimeMillis();
}
@Override
public Collection<Partition<AbstractKinesisInputOperator>> definePartitions(Collection<Partition<AbstractKinesisInputOperator>> partitions, PartitioningContext context)
{
boolean isInitialParitition = partitions.iterator().next().getStats() == null;
// Set the credentials to get the list of shards
if(isInitialParitition) {
try {
KinesisUtil.getInstance().createKinesisClient(accessKey, secretKey, endPoint);
} catch (Exception e) {
throw new RuntimeException("[definePartitions]: Unable to load credentials. ", e);
}
}
List<Shard> shards = KinesisUtil.getInstance().getShardList(getStreamName());
// Operator partitions
List<Partition<AbstractKinesisInputOperator>> newPartitions = null;
Set<Integer> deletedOperators = Sets.newHashSet();
// initialize the shard positions
Map<String, String> initShardPos = null;
if(isInitialParitition && shardManager !=null){
initShardPos = shardManager.loadInitialShardPositions();
}
switch (strategy) {
// For the 1 to 1 mapping The framework will create number of operator partitions based on kinesis shards
// Each operator partition will consume from only one kinesis shard
case ONE_TO_ONE:
if (isInitialParitition) {
lastRepartitionTime = System.currentTimeMillis();
logger.info("[ONE_TO_ONE]: Initializing partition(s)");
// initialize the number of operator partitions according to number of shards
newPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>(shards.size());
for (int i = 0; i < shards.size(); i++) {
logger.info("[ONE_TO_ONE]: Create operator partition for kinesis partition: " + shards.get(i).getShardId() + ", StreamName: " + this.getConsumer().streamName);
newPartitions.add(createPartition(Sets.newHashSet(shards.get(i).getShardId()), initShardPos));
}
} else if (newWaitingPartition.size() != 0) {
// Remove the partitions for the closed shards
removePartitionsForClosedShards(partitions, deletedOperators);
// add partition for new kinesis shard
for (String pid : newWaitingPartition) {
logger.info("[ONE_TO_ONE]: Add operator partition for kinesis partition " + pid);
partitions.add(createPartition(Sets.newHashSet(pid), null));
}
newWaitingPartition.clear();
List<WindowDataManager> managers = windowDataManager.partition(partitions.size(), deletedOperators);
int i = 0;
for (Partition<AbstractKinesisInputOperator> partition : partitions) {
partition.getPartitionedInstance().setWindowDataManager(managers.get(i));
i++;
}
return partitions;
}
break;
// For the N to 1 mapping The initial partition number is defined by stream application
// Afterwards, the framework will dynamically adjust the partition
case MANY_TO_ONE:
/* This case was handled into two ways.
1. Dynamic Partition: Number of DT partitions is depends on the number of open shards.
2. Static Partition: Number of DT partitions is fixed, whether the number of shards are increased/decreased.
*/
int size = initialPartitionCount;
if (newWaitingPartition.size() != 0) {
// Get the list of open shards
shards = getOpenShards(partitions);
if (shardsPerPartition > 1)
size = (int)Math.ceil(shards.size() / (shardsPerPartition * 1.0));
initShardPos = shardManager.loadInitialShardPositions();
}
@SuppressWarnings("unchecked")
Set<String>[] pIds = (Set<String>[]) Array.newInstance((new HashSet<String>()).getClass(), size);
newPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>(size);
for (int i = 0; i < shards.size(); i++) {
Shard pm = shards.get(i);
if (pIds[i % size] == null) {
pIds[i % size] = new HashSet<String>();
}
pIds[i % size].add(pm.getShardId());
}
if (isInitialParitition) {
lastRepartitionTime = System.currentTimeMillis();
logger.info("[MANY_TO_ONE]: Initializing partition(s)");
} else {
logger.info("[MANY_TO_ONE]: Add operator partition for kinesis partition(s): " + StringUtils.join(newWaitingPartition, ", ") + ", StreamName: " + this.getConsumer().streamName);
newWaitingPartition.clear();
}
// Add the existing partition Ids to the deleted operators
for (Partition<AbstractKinesisInputOperator> op : partitions) {
deletedOperators.add(op.getPartitionedInstance().operatorId);
}
for (int i = 0; i < pIds.length; i++) {
logger.info("[MANY_TO_ONE]: Create operator partition for kinesis partition(s): " + StringUtils.join(pIds[i], ", ") + ", StreamName: " + this.getConsumer().streamName);
if (pIds[i] != null) {
newPartitions.add(createPartition(pIds[i], initShardPos));
}
}
break;
default:
break;
}
int i = 0;
List<WindowDataManager> managers = windowDataManager.partition(partitions.size(), deletedOperators);
for (Partition<AbstractKinesisInputOperator> partition : partitions) {
partition.getPartitionedInstance().setWindowDataManager(managers.get(i++));
}
return newPartitions;
}
@Override
public Response processStats(BatchedOperatorStats stats)
{
Response resp = new Response();
List<KinesisConsumer.KinesisShardStats> kstats = extractkinesisStats(stats);
resp.repartitionRequired = isPartitionRequired(kstats);
return resp;
}
private void updateShardPositions(List<KinesisConsumer.KinesisShardStats> kstats)
{
//In every partition check interval, call shardmanager to update the positions
if (shardManager != null) {
shardManager.updatePositions(KinesisConsumer.KinesisShardStatsUtil.getShardStatsForPartitions(kstats));
}
}
private List<KinesisConsumer.KinesisShardStats> extractkinesisStats(BatchedOperatorStats stats)
{
//preprocess the stats
List<KinesisConsumer.KinesisShardStats> kmsList = new LinkedList<KinesisConsumer.KinesisShardStats>();
for (Stats.OperatorStats os : stats.getLastWindowedStats()) {
if (os != null && os.counters instanceof KinesisConsumer.KinesisShardStats) {
kmsList.add((KinesisConsumer.KinesisShardStats) os.counters);
}
}
return kmsList;
}
private boolean isPartitionRequired(List<KinesisConsumer.KinesisShardStats> kstats)
{
long t = System.currentTimeMillis();
if (t - lastCheckTime < repartitionCheckInterval) {
// return false if it's within repartitionCheckInterval since last time it check the stats
return false;
}
logger.debug("Use ShardManager to update the Shard Positions");
updateShardPositions(kstats);
if(repartitionInterval < 0){
// if repartition is disabled
return false;
}
if(t - lastRepartitionTime < repartitionInterval) {
// return false if it's still within repartitionInterval since last (re)partition
return false;
}
try {
// monitor if shards are repartitioned
Set<String> existingIds = new HashSet<String>();
for (PartitionInfo pio : currentPartitionInfo) {
existingIds.addAll(pio.kpids);
}
List<Shard> shards = KinesisUtil.getInstance().getShardList(getStreamName());
for (Shard shard :shards) {
if (!existingIds.contains(shard.getShardId())) {
newWaitingPartition.add(shard.getShardId());
}
}
if (newWaitingPartition.size() != 0) {
// found new kinesis partition
lastRepartitionTime = t;
return true;
}
return false;
} finally {
// update last check time
lastCheckTime = System.currentTimeMillis();
}
}
// If all the shards in the partition are closed, then remove that partition
private void removePartitionsForClosedShards(Collection<Partition<AbstractKinesisInputOperator>> partitions, Set<Integer> deletedOperators)
{
List<Partition<AbstractKinesisInputOperator>> closedPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>();
for(Partition<AbstractKinesisInputOperator> op : partitions)
{
if(op.getPartitionedInstance().getConsumer().getClosedShards().size() ==
op.getPartitionedInstance().getConsumer().getNumOfShards())
{
closedPartitions.add(op);
deletedOperators.add(op.getPartitionedInstance().operatorId);
}
}
if(closedPartitions.size() != 0)
{
for(Partition<AbstractKinesisInputOperator> op : closedPartitions)
{
partitions.remove(op);
}
}
}
// Get the list of open shards
private List<Shard> getOpenShards(Collection<Partition<AbstractKinesisInputOperator>> partitions)
{
List<Shard> closedShards = new ArrayList<Shard>();
for(Partition<AbstractKinesisInputOperator> op : partitions)
{
closedShards.addAll(op.getPartitionedInstance().getConsumer().getClosedShards());
}
List<Shard> shards = KinesisUtil.getInstance().getShardList(getStreamName());
List<Shard> openShards = new ArrayList<Shard>();
for (Shard shard :shards) {
if(!closedShards.contains(shard)) {
openShards.add(shard);
}
}
return openShards;
}
// Create a new partition with the shardIds and initial shard positions
private
Partition<AbstractKinesisInputOperator> createPartition(Set<String> shardIds, Map<String, String> initShardPos)
{
Partition<AbstractKinesisInputOperator> p = new DefaultPartition<AbstractKinesisInputOperator>(KryoCloneUtils.cloneObject(this));
p.getPartitionedInstance().getConsumer().setShardIds(shardIds);
p.getPartitionedInstance().getConsumer().resetShardPositions(initShardPos);
PartitionInfo pif = new PartitionInfo();
pif.kpids = shardIds;
currentPartitionInfo.add(pif);
return p;
}
/**
* Implement Component Interface.
*
* @param context
*/
@Override
public void setup(OperatorContext context)
{
this.context = context;
try {
KinesisUtil.getInstance().createKinesisClient(accessKey, secretKey, endPoint);
} catch(Exception e)
{
throw new RuntimeException(e);
}
consumer.create();
operatorId = context.getId();
windowDataManager.setup(context);
shardPosition.clear();
if (context.getValue(OperatorContext.ACTIVATION_WINDOW_ID) < windowDataManager.getLargestCompletedWindow()) {
isReplayState = true;
}
}
/**
* Implement Component Interface.
*/
@Override
public void teardown()
{
windowDataManager.teardown();
consumer.teardown();
}
/**
* Implement Operator Interface.
*/
@Override
public void beginWindow(long windowId)
{
emitCount = 0;
currentWindowId = windowId;
if (windowId <= windowDataManager.getLargestCompletedWindow()) {
replay(windowId);
}
}
protected void replay(long windowId)
{
try {
@SuppressWarnings("unchecked")
Map<String, KinesisPair<String, Integer>> recoveredData =
(Map<String, KinesisPair<String, Integer>>)windowDataManager.retrieve(windowId);
if (recoveredData == null) {
return;
}
for (Map.Entry<String, KinesisPair<String, Integer>> rc: recoveredData.entrySet()) {
logger.debug("Replaying the windowId: {}", windowId);
logger.debug("ShardId: " + rc.getKey() + " , Start Sequence Id: " + rc.getValue().getFirst() + " , No Of Records: " + rc.getValue().getSecond());
try {
List<Record> records = KinesisUtil.getInstance().getRecords(consumer.streamName, rc.getValue().getSecond(),
rc.getKey(), ShardIteratorType.AT_SEQUENCE_NUMBER, rc.getValue().getFirst());
for (Record record : records) {
outputPort.emit(getTuple(record));
shardPosition.put(rc.getKey(), record.getSequenceNumber());
}
} catch(Exception e)
{
throw new RuntimeException(e);
}
}
}
catch (IOException e) {
throw new RuntimeException("replay", e);
}
}
/**
* Implement Operator Interface.
*/
@Override
public void endWindow()
{
if (currentWindowId > windowDataManager.getLargestCompletedWindow()) {
context.setCounters(getConsumer().getConsumerStats(shardPosition));
try {
windowDataManager.save(currentWindowRecoveryState, currentWindowId);
}
catch (IOException e) {
throw new RuntimeException("saving recovery", e);
}
}
currentWindowRecoveryState.clear();
}
/**
* Implement ActivationListener Interface.
*/
@Override
public void activate(OperatorContext ctx)
{
if(isReplayState)
{
// If it is a replay state, don't start the consumer
return;
}
consumer.start();
}
@Override
public void committed(long windowId)
{
try {
windowDataManager.committed(windowId);
}
catch (IOException e) {
throw new RuntimeException("deleting state", e);
}
}
@Override
public void checkpointed(long windowId)
{
}
@Override
public void beforeCheckpoint(long windowId)
{
}
/**
* Implement ActivationListener Interface.
*/
@Override
public void deactivate()
{
consumer.stop();
}
/**
* Implement InputOperator Interface.
*/
@Override
public void emitTuples()
{
if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
return;
}
int count = consumer.getQueueSize();
if(maxTuplesPerWindow > 0)
count = Math.min(count, maxTuplesPerWindow - emitCount);
for (int i = 0; i < count; i++) {
Pair<String, Record> data = consumer.pollRecord();
String shardId = data.getFirst();
String recordId = data.getSecond().getSequenceNumber();
T tuple = getTuple(data.getSecond());
outputPort.emit(tuple);
if(!currentWindowRecoveryState.containsKey(shardId))
{
currentWindowRecoveryState.put(shardId, new KinesisPair<String, Integer>(recordId, 1));
} else {
KinesisPair<String, Integer> second = currentWindowRecoveryState.get(shardId);
Integer noOfRecords = second.getSecond();
currentWindowRecoveryState.put(data.getFirst(), new KinesisPair<String, Integer>(second.getFirst(), noOfRecords+1));
}
shardPosition.put(shardId, recordId);
}
if(isReplayState)
{
isReplayState = false;
// Set the shard positions to the consumer
Map<String, String> statsData = new HashMap<String, String>(getConsumer().getShardPosition());
statsData.putAll(shardPosition);
getConsumer().resetShardPositions(statsData);
consumer.start();
}
emitCount += count;
}
public static enum PartitionStrategy
{
/**
* Each operator partition connect to only one kinesis Shard
*/
ONE_TO_ONE,
/**
* Each operator consumes from several Shards.
*/
MANY_TO_ONE
}
static class PartitionInfo
{
Set<String> kpids;
}
public void setConsumer(KinesisConsumer consumer)
{
this.consumer = consumer;
}
public KinesisConsumer getConsumer()
{
return consumer;
}
public String getStreamName()
{
return this.consumer.getStreamName();
}
public void setStreamName(String streamName)
{
this.consumer.setStreamName(streamName);
}
public int getMaxTuplesPerWindow()
{
return maxTuplesPerWindow;
}
public void setMaxTuplesPerWindow(int maxTuplesPerWindow)
{
this.maxTuplesPerWindow = maxTuplesPerWindow;
}
public PartitionStrategy getStrategy()
{
return strategy;
}
public void setStrategy(String policy)
{
this.strategy = PartitionStrategy.valueOf(policy.toUpperCase());
}
public OperatorContext getContext()
{
return context;
}
public void setContext(OperatorContext context)
{
this.context = context;
}
public ShardManager getShardManager()
{
return shardManager;
}
public void setShardManager(ShardManager shardManager)
{
this.shardManager = shardManager;
}
public long getRepartitionInterval()
{
return repartitionInterval;
}
public void setRepartitionInterval(long repartitionInterval)
{
this.repartitionInterval = repartitionInterval;
}
public long getRepartitionCheckInterval()
{
return repartitionCheckInterval;
}
public void setRepartitionCheckInterval(long repartitionCheckInterval)
{
this.repartitionCheckInterval = repartitionCheckInterval;
}
public Integer getShardsPerPartition()
{
return shardsPerPartition;
}
public void setShardsPerPartition(Integer shardsPerPartition)
{
this.shardsPerPartition = shardsPerPartition;
}
public int getInitialPartitionCount()
{
return initialPartitionCount;
}
public void setInitialPartitionCount(int initialPartitionCount)
{
this.initialPartitionCount = initialPartitionCount;
}
public void setInitialOffset(String initialOffset)
{
this.consumer.initialOffset = initialOffset;
}
public String getAccessKey()
{
return accessKey;
}
public void setAccessKey(String accessKey)
{
this.accessKey = accessKey;
}
public String getSecretKey()
{
return secretKey;
}
public void setSecretKey(String secretKey)
{
this.secretKey = secretKey;
}
public String getEndPoint()
{
return endPoint;
}
public void setEndPoint(String endPoint)
{
this.endPoint = endPoint;
}
public WindowDataManager getWindowDataManager()
{
return windowDataManager;
}
public void setWindowDataManager(WindowDataManager windowDataManager)
{
this.windowDataManager = windowDataManager;
}
} |
<filename>java/com/lyghtningwither/honeyfunmods/entity/EntityLightningStaff.java<gh_stars>0
package com.lyghtningwither.honeyfunmods.entity;
import net.minecraft.block.BlockSand;
import net.minecraft.block.BlockStainedGlass;
import net.minecraft.entity.Entity;
import net.minecraft.entity.EntityLivingBase;
import net.minecraft.entity.effect.EntityLightningBolt;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.entity.projectile.EntityFireball;
import net.minecraft.entity.projectile.ProjectileHelper;
import net.minecraft.init.Blocks;
import net.minecraft.item.EnumDyeColor;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.nbt.NBTTagList;
import net.minecraft.util.DamageSource;
import net.minecraft.util.EnumParticleTypes;
import net.minecraft.util.math.BlockPos;
import net.minecraft.util.math.RayTraceResult;
import net.minecraft.world.World;
import net.minecraftforge.event.ForgeEventFactory;
public class EntityLightningStaff extends EntityFireball {
public double accelX, accelY, accelZ;
public EntityLivingBase shooter;
private int lightningStrikes;
public EntityLightningStaff(World worldIn) {
super(worldIn);
}
public EntityLightningStaff(World worldIn, EntityPlayer playerIn, int x, int y, int z, double accelX, double accelY, double accelZ) {
super(worldIn, x, y, z, accelX, accelY, accelZ);
}
public EntityLightningStaff(World worldIn, EntityPlayer playerIn, int accelX, int accelY, int accelZ) {
super(worldIn, playerIn, accelX, accelY, accelZ);
}
@Override
protected void entityInit() {
this.lightningStrikes = this.world.rand.nextInt(2) + 1;
}
@Override
protected boolean isFireballFiery() {
return false;
}
protected void onImpact(RayTraceResult trace) {
if(trace.typeOfHit == RayTraceResult.Type.BLOCK) {
EntityLightningBolt lightning = new EntityLightningBolt(this.world, this.posX, this.posY, this.posZ, false);
if(this.world.getBlockState(new BlockPos(posX, posY, posZ)) == Blocks.SAND.getDefaultState().withProperty(Blocks.SAND.VARIANT, BlockSand.EnumType.SAND)) {
this.world.setBlockState(new BlockPos(posX, posY, posZ), Blocks.GLASS.getDefaultState());
}
if(this.world.getBlockState(new BlockPos(posX, posY, posZ)) == Blocks.SAND.getDefaultState().withProperty(Blocks.SAND.VARIANT, BlockSand.EnumType.RED_SAND)) {
this.world.setBlockState(new BlockPos(posX, posY, posZ), Blocks.STAINED_GLASS.getDefaultState().withProperty(BlockStainedGlass.COLOR, EnumDyeColor.RED));
}
this.world.spawnEntity(lightning);
this.setDead();
}
else if(trace.typeOfHit == RayTraceResult.Type.ENTITY) {
EntityLightningBolt lightning = new EntityLightningBolt(this.world, this.posX, this.posY, this.posZ, false);
this.world.spawnEntity(lightning);
trace.entityHit.attackEntityFrom(DamageSource.GENERIC, this.rand.nextInt(2) + 1);
this.setDead();
}
}
@Override
public void readEntityFromNBT(NBTTagCompound compound) {
if(compound.hasKey("ticks_existed")) this.ticksExisted = compound.getInteger("ticks_existed");
if(compound.hasKey("lightning_strikes")) this.lightningStrikes = compound.getInteger("lightning_strikes");
if(compound.hasKey("direction")) {
NBTTagList list = compound.getTagList("direction", 6);
this.accelX = list.getDoubleAt(0);
this.accelY = list.getDoubleAt(1);
this.accelZ = list.getDoubleAt(2);
}
}
@Override
public void writeEntityToNBT(NBTTagCompound compound) {
compound.setInteger("ticks_existed", this.ticksExisted);
compound.setInteger("lightning_strikes", this.lightningStrikes);
compound.setTag("direction", this.newDoubleNBTList(this.accelX, this.accelY, this.accelZ));
}
}
|
[The following is an exact transcript of this podcast.]
If you’ve ever craved an ice-cold soda, you know that sometimes you’re just looking for something that tastes…fizzy. If that sounds odd, scientists have discovered that carbonation actually has a flavor. And that our taste buds can sense CO2.
Bubbly soft drinks tickle our tongues with their effervescence. But researchers got to wondering whether we can taste the carbonation. To find out, they studied mice whose taste cells had been turned off, one flavor at a time. So, one mouse couldn’t taste sweet things, another couldn’t taste bitter, a third couldn’t taste salt, and so on. And they found that mice lacking the cells that sense the taste sour no longer respond to CO2.
Probing further, they discovered that eliminating a single gene renders these mice blind, if you will, to the taste of carbonation. That gene encodes an enzyme that breaks down CO2—and water…don’t forget the water—into bicarbonate and protons. And it’s the protons—which are essentially acid—that the sour-sensitive cells seem to sense. The work appears in the journal Science.
The scientists speculate that our CO2 sensor evolved to help us avoid food that’s spoiled. Yet we still like some of our drinks to include the delightfully acidic tingle of a touch of CO2.
—Karen Hopkin |
// maybeMakeMessages recognizes when a dict is assigned to a message field or
// when a list or tuple of dicts or Nones is assigned to a repeated message
// field.
//
// It converts dicts or Nones to *Message of corresponding type using NewMessage
// and FromDict and returns them as Starlark values to use in place of passed
// value.
//
// Returns 'val' as is in other cases. Returns an error if given a dict, but
// it can't be used to initialize a message (e.g. has wrong schema).
func maybeMakeMessages(typ reflect.Type, val starlark.Value) (starlark.Value, error) {
if dict, ok := val.(*starlark.Dict); ok && isProtoType(typ) {
t, err := GetMessageType(typ)
if err != nil {
return nil, err
}
msg := NewMessage(t)
return msg, msg.FromDict(dict)
}
if seq, ok := val.(starlark.Sequence); ok && typ.Kind() == reflect.Slice && isProtoType(typ.Elem()) && shouldMakeMessages(seq) {
t, err := GetMessageType(typ.Elem())
if err != nil {
return nil, err
}
iter := seq.Iterate()
defer iter.Done()
out := make([]starlark.Value, 0, seq.Len())
var v starlark.Value
for iter.Next(&v) {
switch val := v.(type) {
case starlark.NoneType:
out = append(out, NewMessage(t))
case *starlark.Dict:
msg := NewMessage(t)
if err := msg.FromDict(val); err != nil {
return nil, err
}
out = append(out, msg)
default:
out = append(out, v)
}
}
return starlark.NewList(out), nil
}
return val, nil
} |
when in doubt, pull a bait and switch?
Pundits and commentators are still perplexed by Ken Ham and his temple of ignorance, and keep setting out on a snipe hunt for a deep, complicated reason why he created it.
For his article in Vanity Fair, British writer and critic A. A. Gill took a trip to Ken Ham’s grand temple to Biblical literalism and fundamentalist indoctrination; the Creation Museum. And needless to say, he’s not impressed by either the heavy handed preaching, the appeals to emotion in the exhibits, or the exhibits themselves. This, according to him, is taking whatever inspiration the high flying metaphors of the Bible could offer to the faithful and inspire amateur scientists to study the world around them in search of existential answers, and reducing them down to simplistic, bombastic proselytizing for the sake of proselytizing. Despite passionately insisting that they’re just better at interpreting the same volumes of scientific evidence studied in colleges and research labs across the world, everything the Creation Museum is about can be summarized in just one question.
After PZ Myers and a group of atheists and agnostics took a look at the museum for themselves, I tried to give an answer to this false dichotomy and that answer boiled down to only one thing. Creationists don’t just have dissenting opinions on the evidence for evolution and modern cosmology. They have a worldview they want to defend and whenever we challenge their ideas, the comfort they take in their assurance that they have almost everything in this universe figured out thanks to a manual from its creator is suddenly shaken. They hate that. They hate that very much.
For them science is only good when it can be either twisted to justify their views and opinions, or when it doesn’t concern anything they passionately guard from scientific inspection. Because the vulnerability they feel when their beliefs are questioned is so deep but they know that the word science has to be somewhere in the mix for anyone to take them seriously, they stage science fairs minus the actual science by mandating that participants discard any scientific concept contradicting the Bible in their entries. And it was this attempt to seem scientific without compromising their beliefs on which the Vanity Fair piece quickly focused when describing their museum itself.
One very interesting thing to Gill seems to be the fact that instead of incorporating religious elements into the museum’s appearance to those about to enter it, Ham chose a secular motif of a scientific institution, trying to somehow dress up his blatantly transparent intentions in a veneer of scientific credibility. This is Ham’s good, old trick of repeatedly using the word science and pretending he actually cares about it.
You can see this in a few of his publications which employ an astrophysicist who jumps from sound science to gibberish in just two sentences, and someone who claims to be a former space program engineer who converted into full blown fundamentalism after being swayed by “the evidence he saw in his long line of work” but doesn’t seem to have a grasp of even middle school level astronomy.
And what happens when you point out all their errors? Why they simply jam their fingers further and further down their ear canals and start screaming louder to tune out those nasty naturalists and skeptics. For example, take the creationist science teacher who decided to justify that the Bible is perfectly compatible with modern science on this blog, arguing that because they’re so compatible, he should have the right to turn his science class into Christian Theology 101. After reading a counter-argument for the examples he provided can you guess what his reply was?
It is obvious your ignorance of science is superceded by your ignorance of the Bible. I’ve gleamed from your comments that I need to be more fervent in my efforts to make positive [that] all students understand the evidence supporting the existence of God and His creation.
Even though he hadn’t made a single scientific assertion and actually got several things wrong about tectonic plate movements and the classification of cetaceans, I’m the ignorant one and it’s his duty to make sure that his view of the world is treated as gospel. There wasn’t even a single counter-argument or attempt to create a real exchange. Just an outraged insult and a promise to shout his arrogant ignorance louder to his students. And so it seems that the evidence for God is so powerful that students can’t be allowed to be taught only facts or be left alone to make up their own minds because they’ll choose wrong.
Unless of course “make up their own minds” is used as a euphemism that they’ll be indoctrinated into creationism and fed distortions of what evolutionary and cosmological theories entail by someone who lacks the requisite knowledge in those areas. This is the goal of our creationist teacher and Ken Ham. Their questions about evidence and pleas for open- minded inquiry are just cheap bait and switch. And believe it or not, so would be our insistence that religious tenets and science are always compatible and we never have to choose between one or the other… |
/*! Abstract interface ... no data, only results.
Basically used to change the BlackVariance() methods to
totalVariance. Also deal with lagged observations of an index
with a (usually different) availability lag.
*/
class YoYOptionletVolatilitySurface : public VolatilityTermStructure {
public:
YoYOptionletVolatilitySurface(Natural settlementDays,
const Calendar&,
BusinessDayConvention bdc,
const DayCounter& dc,
const Period& observationLag,
Frequency frequency,
bool indexIsInterpolated);
virtual ~YoYOptionletVolatilitySurface() {}
Volatility volatility(const Date& maturityDate,
Rate strike,
const Period &obsLag = Period(-1,Days),
bool extrapolate = false) const;
Volatility volatility(const Period& optionTenor,
Rate strike,
const Period &obsLag = Period(-1,Days),
bool extrapolate = false) const;
virtual Volatility totalVariance(const Date& exerciseDate,
Rate strike,
const Period &obsLag = Period(-1,Days),
bool extrapolate = false) const;
virtual Volatility totalVariance(const Period& optionTenor,
Rate strike,
const Period &obsLag = Period(-1,Days),
bool extrapolate = false) const;
virtual Period observationLag() const { return observationLag_; }
virtual Frequency frequency() const { return frequency_; }
virtual bool indexIsInterpolated() const { return indexIsInterpolated_; }
virtual Date baseDate() const;
virtual Time timeFromBase(const Date &date,
const Period& obsLag = Period(-1,Days)) const;
virtual Real minStrike() const = 0;
virtual Real maxStrike() const = 0;
virtual Volatility baseLevel() const {
QL_REQUIRE(baseLevel_ != Null<Volatility>(),
"Base volatility, for baseDate(), not set.");
return baseLevel_;
}
protected:
virtual void checkRange(const Date &, Rate strike, bool extrapolate) const;
virtual void checkRange(Time, Rate strike, bool extrapolate) const;
virtual Volatility volatilityImpl(Time length,
Rate strike) const = 0;
virtual void setBaseLevel(Volatility v) { baseLevel_ = v; }
mutable Volatility baseLevel_;
Period observationLag_;
Frequency frequency_;
bool indexIsInterpolated_;
} |
def modify_url_for_impersonation(cls, url, impersonate_user, username):
pass |
<filename>src/Network/MoHWS/Server/Environment.hs
{- |
Copyright: 2006, <NAME>
Copyright: 2009, <NAME>
This is an extension of ServerContext,
which is used privately in the Server.
In addition to ServerContext it holds the module list,
which is not accessible by modules.
-}
module Network.MoHWS.Server.Environment where
import qualified Network.MoHWS.Server.Context as ServerContext
import qualified Network.MoHWS.Server.Options as Options
import qualified Network.MoHWS.Server.Request as ServerRequest
import qualified Network.MoHWS.Configuration as Config
import qualified Network.MoHWS.Module as Module
import qualified Network.MoHWS.Logger.Access as AccessLogger
import qualified Network.MoHWS.Logger.Error as ErrorLogger
import qualified Network.MoHWS.HTTP.MimeType as MimeType
import qualified Network.MoHWS.HTTP.Response as Response
import Control.Monad (foldM, msum, )
import Control.Monad.Trans.Maybe (MaybeT, runMaybeT, )
import Network.BSD (HostEntry, )
import Network.Socket (PortNumber, )
import System.Time (TimeDiff, )
data T body ext = Cons
{
context :: ServerContext.T ext,
port :: PortNumber,
modules :: [Module.T body]
}
-- * Read accessors
options :: T body ext -> Options.T
options = ServerContext.options . context
config :: T body ext -> Config.T ext
config = ServerContext.config . context
hostName :: T body ext -> HostEntry
hostName = ServerContext.hostName . context
mimeTypes :: T body ext -> MimeType.Dictionary
mimeTypes = ServerContext.mimeTypes . context
errorLogger :: T body ext -> ErrorLogger.Handle
errorLogger = ServerContext.errorLogger . context
accessLoggers :: T body ext -> [AccessLogger.Handle]
accessLoggers = ServerContext.accessLoggers . context
-- * Loggers
instance ErrorLogger.HasHandle (T body ext) where
getHandle = errorLogger
logAccess :: T body ext -> ServerRequest.T body -> Response.T body -> TimeDiff -> IO ()
logAccess = ServerContext.logAccess . context
-- * Modules
mapModules_ :: T body ext -> (Module.T body -> IO ()) -> IO ()
mapModules_ st f = mapM_ f (modules st)
foldModules :: T body ext -> (Module.T body -> a -> IO a) -> a -> IO a
foldModules st f x = foldM (flip f) x (modules st)
tryModules :: T body ext -> (Module.T body -> MaybeT IO a) -> IO (Maybe a)
tryModules st f = runMaybeT $ msum $ map f $ modules st
|
<reponame>thorsten-l/diva
package sonia.webapp.diva.db;
import org.springframework.data.repository.PagingAndSortingRepository;
import org.springframework.stereotype.Repository;
/**
*
* @author <NAME> (<EMAIL>)
*/
@Repository
public interface DgcvRequestLogRepository extends
PagingAndSortingRepository<DgcvRequestLog, String>
{
}
|
/**
* This is a {@link net.sf.mmm.util.validation.api.ValueValidator} that
* {@link net.sf.mmm.util.validation.api.ValueValidator#validate(Object, Object) validates} that a date lies in the
* past.
*
* @param <V> is the generic type of the value to {@link #validate(Object) validate}.
*
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 8.5.0
*/
public abstract class ValidatorTimePast<V> extends AbstractValueValidator<V> {
/** @see #getCode() */
public static final String CODE = "Past";
/**
* The constructor.
*/
public ValidatorTimePast() {
super();
}
@Override
protected String getCode() {
return CODE;
}
@Override
protected NlsMessage validateNotNull(V value) {
if (isPast(value)) {
return null;
}
return createBundle(NlsBundleUtilValidationRoot.class).errorValueNotInPast(value);
}
/**
* @param value the date to check.
* @return {@code true} if in future, {@code false} otherwise.
*/
protected abstract boolean isPast(V value);
@Override
public int hashCode() {
return 2424;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
return true;
}
} |
def toMatrix(self):
twoxx = 2 * self.x * self.x
twoyy = 2 * self.y * self.y
twozz = 2 * self.z * self.z
twowx = 2 * self.w * self.x
twowy = 2 * self.w * self.y
twowz = 2 * self.w * self.z
twoxy = 2 * self.x * self.y
twoxz = 2 * self.x * self.z
twoyz = 2 * self.y * self.z
return Matrix33([[1 - twoyy - twozz, twoxy - twowz, twoxz + twowy],
[twoxy + twowz, 1 - twoxx - twozz, twoyz - twowx],
[twoxz - twowy, twoyz + twowx, 1 - twoxx - twoyy]]) |
import type { FC } from 'react';
import { SvgIcon } from './style';
export const CalendarIcon: FC = () => (
<SvgIcon version="1.1" x="0px" y="0px" viewBox="0 0 512 512">
<g>
<path
d="M452,40h-24V0h-40v40H124V0H84v40H60C26.916,40,0,66.916,0,100v352c0,33.084,26.916,60,60,60h392
c33.084,0,60-26.916,60-60V100C512,66.916,485.084,40,452,40z M472,452c0,11.028-8.972,20-20,20H60c-11.028,0-20-8.972-20-20V188
h432V452z M472,148H40v-48c0-11.028,8.972-20,20-20h24v40h40V80h264v40h40V80h24c11.028,0,20,8.972,20,20V148z"
/>
</g>
<g>
<rect x="76" y="230" width="40" height="40" />
</g>
<g>
<rect x="156" y="230" width="40" height="40" />
</g>
<g>
<rect x="236" y="230" width="40" height="40" />
</g>
<g>
<rect x="316" y="230" width="40" height="40" />
</g>
<g>
<rect x="396" y="230" width="40" height="40" />
</g>
<g>
<rect x="76" y="310" width="40" height="40" />
</g>
<g>
<rect x="156" y="310" width="40" height="40" />
</g>
<g>
<rect x="236" y="310" width="40" height="40" />
</g>
<g>
<rect x="316" y="310" width="40" height="40" />
</g>
<g>
<rect x="76" y="390" width="40" height="40" />
</g>
<g>
<rect x="156" y="390" width="40" height="40" />
</g>
<g>
<rect x="236" y="390" width="40" height="40" />
</g>
<g>
<rect x="316" y="390" width="40" height="40" />
</g>
<g>
<rect x="396" y="310" width="40" height="40" />
</g>
</SvgIcon>
);
|
/**
* Default conflict resolver.Implements closer newer first policy by default, but could be configured via plexus
*
* @author <a href="mailto:[email protected]">Oleg Gusakov</a>
* @version $Id: DefaultGraphConflictResolver.java 958295 2010-06-26 23:16:18Z hboutemy $
*/
@Component( role = GraphConflictResolver.class )
public class DefaultGraphConflictResolver
implements GraphConflictResolver
{
/**
* artifact, closer to the entry point, is selected
*/
@Requirement( role = GraphConflictResolutionPolicy.class )
protected GraphConflictResolutionPolicy policy;
// -------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------
public MetadataGraph resolveConflicts( MetadataGraph graph, ArtifactScopeEnum scope )
throws GraphConflictResolutionException
{
if ( policy == null )
{
throw new GraphConflictResolutionException( "no GraphConflictResolutionPolicy injected" );
}
if ( graph == null )
{
return null;
}
final MetadataGraphVertex entry = graph.getEntry();
if ( entry == null )
{
return null;
}
if ( graph.isEmpty() )
{
throw new GraphConflictResolutionException( "graph with an entry, but not vertices do not exist" );
}
if ( graph.isEmptyEdges() )
{
return null; // no edges - nothing to worry about
}
final TreeSet<MetadataGraphVertex> vertices = graph.getVertices();
try
{
// edge case - single vertex graph
if ( vertices.size() == 1 )
{
return new MetadataGraph( entry );
}
final ArtifactScopeEnum requestedScope = ArtifactScopeEnum.checkScope( scope );
MetadataGraph res = new MetadataGraph( vertices.size() );
res.setVersionedVertices( false );
res.setScopedVertices( false );
MetadataGraphVertex resEntry = res.addVertex( entry.getMd() );
res.setEntry( resEntry );
res.setScope( requestedScope );
for ( MetadataGraphVertex v : vertices )
{
final List<MetadataGraphEdge> ins = graph.getIncidentEdges( v );
final MetadataGraphEdge edge = cleanEdges( v, ins, requestedScope );
if ( edge == null )
{ // no edges - don't need this vertex any more
if ( entry.equals( v ) )
{ // unless it's an entry point.
// currently processing the entry point - it should not have any entry incident edges
res.getEntry().getMd().setWhy( "This is a graph entry point. No links." );
}
else
{
// System.out.println("--->"+v.getMd().toDomainString()
// +" has been terminated on this entry set\n-------------------\n"
// +ins
// +"\n-------------------\n"
// );
}
}
else
{
// System.out.println("+++>"+v.getMd().toDomainString()+" still has "+edge.toString() );
// fill in domain md with actual version data
ArtifactMetadata md = v.getMd();
ArtifactMetadata newMd =
new ArtifactMetadata( md.getGroupId(), md.getArtifactId(), edge.getVersion(), md.getType(),
md.getScopeAsEnum(), md.getClassifier(), edge.getArtifactUri(),
edge.getSource() == null ? "" : edge.getSource().getMd().toString(),
edge.isResolved(), edge.getTarget() == null ? null
: edge.getTarget().getMd().getError() );
MetadataGraphVertex newV = res.addVertex( newMd );
MetadataGraphVertex sourceV = res.addVertex( edge.getSource().getMd() );
res.addEdge( sourceV, newV, edge );
}
}
MetadataGraph linkedRes = findLinkedSubgraph( res );
// System.err.println("Original graph("+graph.getVertices().size()+"):\n"+graph.toString());
// System.err.println("Cleaned("+requestedScope+") graph("+res.getVertices().size()+"):\n"+res.toString());
// System.err.println("Linked("+requestedScope+")
// subgraph("+linkedRes.getVertices().size()+"):\n"+linkedRes.toString());
return linkedRes;
}
catch ( MetadataResolutionException e )
{
throw new GraphConflictResolutionException( e );
}
}
// -------------------------------------------------------------------------------------
private MetadataGraph findLinkedSubgraph( MetadataGraph g )
{
if ( g.getVertices().size() == 1 )
{
return g;
}
List<MetadataGraphVertex> visited = new ArrayList<MetadataGraphVertex>( g.getVertices().size() );
visit( g.getEntry(), visited, g );
List<MetadataGraphVertex> dropList = new ArrayList<MetadataGraphVertex>( g.getVertices().size() );
// collect drop list
for ( MetadataGraphVertex v : g.getVertices() )
{
if ( !visited.contains( v ) )
{
dropList.add( v );
}
}
if ( dropList.size() < 1 )
{
return g;
}
// now - drop vertices
TreeSet<MetadataGraphVertex> vertices = g.getVertices();
for ( MetadataGraphVertex v : dropList )
{
vertices.remove( v );
}
return g;
}
// -------------------------------------------------------------------------------------
private void visit( MetadataGraphVertex from, List<MetadataGraphVertex> visited, MetadataGraph graph )
{
if ( visited.contains( from ) )
{
return;
}
visited.add( from );
List<MetadataGraphEdge> exitList = graph.getExcidentEdges( from );
// String s = "|---> "+from.getMd().toString()+" - "+(exitList == null ? -1 : exitList.size()) + " exit links";
if ( exitList != null && exitList.size() > 0 )
{
for ( MetadataGraphEdge e : graph.getExcidentEdges( from ) )
{
visit( e.getTarget(), visited, graph );
}
}
}
// -------------------------------------------------------------------------------------
private MetadataGraphEdge cleanEdges( MetadataGraphVertex v, List<MetadataGraphEdge> edges,
ArtifactScopeEnum scope )
{
if ( edges == null || edges.isEmpty() )
{
return null;
}
if ( edges.size() == 1 )
{
MetadataGraphEdge e = edges.get( 0 );
if ( scope.encloses( e.getScope() ) )
{
return e;
}
return null;
}
MetadataGraphEdge res = null;
for ( MetadataGraphEdge e : edges )
{
if ( !scope.encloses( e.getScope() ) )
{
continue;
}
if ( res == null )
{
res = e;
}
else
{
res = policy.apply( e, res );
}
}
return res;
}
// -------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------
} |
Credit: © WFOL.com
Niagara Falls 35th Annual Winter Festival of Lights is Back!
It’s back!
Canada’s largest lights festival is officially celebrating its 35th anniversary in Niagara Falls this year for what organizers are claiming to be its brightest season yet.
The Ontario Power Generation Winter Festival of Lights transforms Niagara Falls into a colourful wonderland with millions of sparkling lights and animated displays stretching 8km long illumination route that travels through the beautiful landscapes of the Niagara Parks, Dufferin Islands and surrounding tourist districts.
In fact, each year the Winter Festival of Lights (WFOL) attracts more than one million people from around the world come each year to see more than two million lights displayed throughout Niagara Falls.
The “illuminations include fifteen Canadian Wildlife displays, the Sylma and over fifty trees wrapped in lights in Dufferin Islands, the world’s largest Canadian-American Flag, the light show on the Toronto Power Generating Station, the two-storey tall Zimmerman Fountain, three-dimensional Angels at the Niagara Parks Police Building and a visitor favourite, Noah’s Ark!”
There is also free programming that includes the Fallsview Sound & Light Show at the Oakes Hotel, Laser Light Shows at the Top of Clifton Hill and WFOL Opening Ceremonies in Niagara Parks Queen Victoria Park.
Credit: © WFOL.com
In addition to the its illuminations and displays, the Winter Festival of Lights also has several events taking place each season including Opening Ceremonies in Queen Victoria Park, Laser Light Shows, weekly fireworks presented by Fallsview Casino, the Fallsview Sound & Light Show on the Oakes Hotel, the Deck the Falls Holiday Walking Tour, nightly Falls Illumination, Festival of Stars Concerts, the Sparkle Lighting Awards, Niagara Falls New Years Eve Concert & the Niagara Falls Icewine Festival.
The 2017 festival officially kicks off on November 18, and will run until January 31 of 2018. Admission to the festival and to see the lighting displays is free, but organizers are suggestions donations ranging from $5-$10 per person to help cover costs.
Not sure what to expect? This video (below) should be a good pre-tour guide!
About Brüha :
Brüha is a local entertainment discovery & ticketing provider changing the way people interact with their local community, discover events and purchase tickets. Buy and sell tickets for upcoming events using Brüha. Whether you are a tourist visiting a new city or a local resident, Brüha provides a one-stop-shop that allows you to stay connected to your city.
Looking to increase awareness for your Venue, Organization, or your next Event? Head over to our website and get started today by creating your first listing. |
/**
*
* @author Radek Matous, Jirka Rechtacek
*/
public class TestUtils {
private static UpdateItem item = null;
private static ModuleManager mgr = null;
public static void setUserDir(String path) {
System.setProperty ("netbeans.user", path);
}
/** Returns the platform installatiion directory.
* @return the File directory.
*/
public static File getPlatformDir () {
return new File (System.getProperty ("netbeans.home")); // NOI18N
}
public static void setPlatformDir (String path) {
System.setProperty ("netbeans.home", path);
}
public static void testInit() {
mgr = Main.getModuleSystem().getManager();
assert mgr != null;
}
public static File getFile(NbTestCase t, URL res) throws IOException {
File create;
String name = res.getFile().replaceAll(".*/", "").replaceAll(".jar$", "");
for (int i = 0; ; i++) {
String add = i == 0 ? ".jar" : i + ".jar";
create = new File(t.getWorkDir(), name + add);
if (!create.exists()) {
break;
}
}
FileOutputStream os = new FileOutputStream(create);
FileUtil.copy(res.openStream(), os);
os.close();
return create;
}
public static class CustomItemsProvider implements UpdateProvider {
public String getName() {
return "items-with-custom-installer";
}
public String getDisplayName() {
return "Provides item with own custom installer";
}
public String getDescription () {
return null;
}
public Map<String, UpdateItem> getUpdateItems() {
return Collections.singletonMap ("hello-installer", getUpdateItemWithCustomInstaller ());
}
public boolean refresh(boolean force) {
return true;
}
public CATEGORY getCategory() {
return CATEGORY.COMMUNITY;
}
}
private static CustomInstaller customInstaller = new CustomInstaller () {
public boolean install (String codeName, String specificationVersion, ProgressHandle handle) throws OperationException {
assert false : "Don't call unset installer";
return false;
}
};
public static void setCustomInstaller (CustomInstaller installer) {
customInstaller = installer;
}
public static UpdateItem getUpdateItemWithCustomInstaller () {
if (item != null) return item;
String codeName = "hello-installer";
String specificationVersion = "0.1";
String displayName = "Hello Component";
String description = "Hello I'm a component with own installer";
URL distribution = null;
try {
distribution = new URL ("nbresloc:/org/netbeans/api/autoupdate/data/org-yourorghere-engine-1-1.nbm");
//distribution = new URL ("nbresloc:/org/netbeans/api/autoupdate/data/executable-jar.jar");
} catch (MalformedURLException ex) {
assert false : ex;
}
String author = "Jiri Rechtacek";
String downloadSize = "2815";
String homepage = "http://netbeans.de";
Manifest manifest = new Manifest ();
Attributes mfAttrs = manifest.getMainAttributes ();
CustomInstaller ci = createCustomInstaller ();
assert ci != null;
UpdateLicense license = UpdateLicense.createUpdateLicense ("none-license", "no-license");
item = UpdateItem.createNativeComponent (
codeName,
specificationVersion,
downloadSize,
null, // dependencies
displayName,
description,
false, false, "my-cluster",
ci,
license);
return item;
}
private static CustomInstaller createCustomInstaller () {
return new CustomInstaller () {
public boolean install (String codeName, String specificationVersion, ProgressHandle handle) throws OperationException {
assert item != null;
return customInstaller.install (codeName, specificationVersion, handle);
}
};
}
} |
<reponame>prodanPaulStudent/reporobo
package org.firstinspires.ftc.teamcode.drive.userOpModes.robo7u.TeleOps.AutomatedTeleOps;
public class AutoAimTester {
}
|
// read from the given confsDir to generate a WorkflowConf object
public static WorkflowConf readWfConf(FileSystem fs,
Path confsDir)
throws IOException, FileNotFoundException {
Path inFile = new Path(confsDir, confFileName);
if (!fs.exists(inFile)) {
throw new IOException("WorkflowConf file does not exist in "
+ inFile.toString() + " when attempting"
+ " to read.");
}
if (!fs.isFile(inFile)) {
throw new IOException(inFile.toString() + " is not a file!");
}
FSDataInputStream in = fs.open(inFile);
WorkflowConf wf = new WorkflowConf();
wf.readFields(in);
in.close();
return wf;
} |
/**
* Returns true if selectio contains a component
*
* @param component
* @return boolean
*/
public boolean contains(Component component) {
int x = component.getX() - getX();
int y = component.getY() - getY();
return contains(x, y)
|| contains(x + component.getWidth(), y)
|| contains(x, y + component.getHeight())
|| contains(x + component.getWidth(), y + component.getHeight());
} |
PD Ahn Joon Young, who produced Mnet’s popular hit survival show “Produce 101 Season 2,” recently sat down for an interview, where he talked about a variety of topics.
Despite stating that he wouldn’t do “devil’s editing” during the show’s press conference, many viewers have complained saying that the PD incorporated it in every episode. Some even started to call him “Cancer Joon Young” (the Korean word for “cancer” sounds very similar to the PD’s last name). During his recent interview, the PD stated, “The nickname ‘Cancer Joon Young’…I guess to those people my existence is similar to that of cancer cells. There was no distorted editing and the term ‘devil’s editing’ in itself is wrong.”
He continued to explain, saying, “When I took on ‘Produce 101 Season 2,’ the CEOs of each agency told me not to do it. They said that regardless of whether the show did well or not, I would get cursed out. At that time, I thought ‘If it does well, I won’t get hate for it.’ I think it happened because there are a lot of people who love the 101 trainees, right? There’s nothing that I want to apologize for.
“The term ‘devil’s editing’ was once meant to describe a type of editing where viewers couldn’t take their eyes off their screens. But now the meaning of the term is changing. If I did ‘devil’s editing’ I don’t think I would be able to look the trainees properly in the eyes. The trainees had no problems with the editing, and they told me it was fun. It’s strange to even say that I did ‘devil’s editing,’ and it’s upsetting that I have to talk about this.”
The PD also mentioned how the trainees had to deal with malicious comments. “Whenever the trainees couldn’t really focus and I asked them why, they would say, ‘I couldn’t sleep because I was reading malicious comments last night.’ I told them not to look at them, even if it was still a form of interest. Each of the agencies also asked us to create an environment where the trainees could focus. Some agencies even took away their phones. From the production team’s perspective it was sad to see. They’re very bright kids but all of a sudden they started to become more timid. But those trainees would read the comments about me and worry for me, saying that they became comforted by looking at me.”
Meanwhile, 11 trainees from “Produce 101 Season 2” will be debuting as Wanna One.
Source (1) |
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { ReactiveFormsModule } from '@angular/forms';
import { RouterModule } from '@angular/router';
// import { DragulaModule } from 'ng2-dragula';
import { MatCardModule, MatFormFieldModule, MatInputModule, MatButtonModule } from '@angular/material';
import { DragDropModule } from '@angular/cdk/drag-drop';
import { SharedModule } from 'app/shared/shared.module';
import { ScoreSystemModule } from '../score-system/score-system.module';
import { DisciplinesComponent } from './disciplines.component';
import { DisciplineEditorComponent } from './discipline-editor/discipline-editor.component';
@NgModule({
imports: [
CommonModule,
ReactiveFormsModule,
RouterModule,
// DragulaModule.forRoot(),
ScoreSystemModule,
DragDropModule,
MatCardModule,
MatFormFieldModule,
MatInputModule,
MatButtonModule,
SharedModule,
],
declarations: [
DisciplinesComponent, DisciplineEditorComponent
],
exports: [
DisciplinesComponent, DisciplineEditorComponent,
]
})
export class DisciplinesModule { }
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#include <inttypes.h>
#include <stdint.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <sys/queue.h>
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_common.h>
#include "eal_private.h"
#include "eal_internal_cfg.h"
#include "eal_memalloc.h"
#include "malloc_elem.h"
#include "malloc_heap.h"
/*
* If debugging is enabled, freed memory is set to poison value
* to catch buggy programs. Otherwise, freed memory is set to zero
* to avoid having to zero in zmalloc
*/
#ifdef RTE_MALLOC_DEBUG
#define MALLOC_POISON 0x6b
#else
#define MALLOC_POISON 0
#endif
size_t
malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
{
void *cur_page, *contig_seg_start, *page_end, *cur_seg_end;
void *data_start, *data_end;
rte_iova_t expected_iova;
struct rte_memseg *ms;
size_t page_sz, cur, max;
const struct internal_config *internal_conf =
eal_get_internal_configuration();
page_sz = (size_t)elem->msl->page_sz;
data_start = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
data_end = RTE_PTR_ADD(elem, elem->size - MALLOC_ELEM_TRAILER_LEN);
/* segment must start after header and with specified alignment */
contig_seg_start = RTE_PTR_ALIGN_CEIL(data_start, align);
/* return if aligned address is already out of malloc element */
if (contig_seg_start > data_end)
return 0;
/* if we're in IOVA as VA mode, or if we're in legacy mode with
* hugepages, all elements are IOVA-contiguous. however, we can only
* make these assumptions about internal memory - externally allocated
* segments have to be checked.
*/
if (!elem->msl->external &&
(rte_eal_iova_mode() == RTE_IOVA_VA ||
(internal_conf->legacy_mem &&
rte_eal_has_hugepages())))
return RTE_PTR_DIFF(data_end, contig_seg_start);
cur_page = RTE_PTR_ALIGN_FLOOR(contig_seg_start, page_sz);
ms = rte_mem_virt2memseg(cur_page, elem->msl);
/* do first iteration outside the loop */
page_end = RTE_PTR_ADD(cur_page, page_sz);
cur_seg_end = RTE_MIN(page_end, data_end);
cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start) -
MALLOC_ELEM_TRAILER_LEN;
max = cur;
expected_iova = ms->iova + page_sz;
/* memsegs are contiguous in memory */
ms++;
cur_page = RTE_PTR_ADD(cur_page, page_sz);
while (cur_page < data_end) {
page_end = RTE_PTR_ADD(cur_page, page_sz);
cur_seg_end = RTE_MIN(page_end, data_end);
/* reset start of contiguous segment if unexpected iova */
if (ms->iova != expected_iova) {
/* next contiguous segment must start at specified
* alignment.
*/
contig_seg_start = RTE_PTR_ALIGN(cur_page, align);
/* new segment start may be on a different page, so find
* the page and skip to next iteration to make sure
* we're not blowing past data end.
*/
ms = rte_mem_virt2memseg(contig_seg_start, elem->msl);
cur_page = ms->addr;
/* don't trigger another recalculation */
expected_iova = ms->iova;
continue;
}
/* cur_seg_end ends on a page boundary or on data end. if we're
* looking at data end, then malloc trailer is already included
* in the calculations. if we're looking at page end, then we
* know there's more data past this page and thus there's space
* for malloc element trailer, so don't count it here.
*/
cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start);
/* update max if cur value is bigger */
if (cur > max)
max = cur;
/* move to next page */
cur_page = page_end;
expected_iova = ms->iova + page_sz;
/* memsegs are contiguous in memory */
ms++;
}
return max;
}
/*
* Initialize a general malloc_elem header structure
*/
void
malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,
struct rte_memseg_list *msl, size_t size,
struct malloc_elem *orig_elem, size_t orig_size, bool dirty)
{
elem->heap = heap;
elem->msl = msl;
elem->prev = NULL;
elem->next = NULL;
memset(&elem->free_list, 0, sizeof(elem->free_list));
elem->state = ELEM_FREE;
elem->dirty = dirty;
elem->size = size;
elem->pad = 0;
elem->orig_elem = orig_elem;
elem->orig_size = orig_size;
set_header(elem);
set_trailer(elem);
}
void
malloc_elem_insert(struct malloc_elem *elem)
{
struct malloc_elem *prev_elem, *next_elem;
struct malloc_heap *heap = elem->heap;
/* first and last elements must be both NULL or both non-NULL */
if ((heap->first == NULL) != (heap->last == NULL)) {
RTE_LOG(ERR, EAL, "Heap is probably corrupt\n");
return;
}
if (heap->first == NULL && heap->last == NULL) {
/* if empty heap */
heap->first = elem;
heap->last = elem;
prev_elem = NULL;
next_elem = NULL;
} else if (elem < heap->first) {
/* if lower than start */
prev_elem = NULL;
next_elem = heap->first;
heap->first = elem;
} else if (elem > heap->last) {
/* if higher than end */
prev_elem = heap->last;
next_elem = NULL;
heap->last = elem;
} else {
/* the new memory is somewhere between start and end */
uint64_t dist_from_start, dist_from_end;
dist_from_end = RTE_PTR_DIFF(heap->last, elem);
dist_from_start = RTE_PTR_DIFF(elem, heap->first);
/* check which is closer, and find closest list entries */
if (dist_from_start < dist_from_end) {
prev_elem = heap->first;
while (prev_elem->next < elem)
prev_elem = prev_elem->next;
next_elem = prev_elem->next;
} else {
next_elem = heap->last;
while (next_elem->prev > elem)
next_elem = next_elem->prev;
prev_elem = next_elem->prev;
}
}
/* insert new element */
elem->prev = prev_elem;
elem->next = next_elem;
if (prev_elem)
prev_elem->next = elem;
if (next_elem)
next_elem->prev = elem;
}
/*
* Attempt to find enough physically contiguous memory in this block to store
* our data. Assume that element has at least enough space to fit in the data,
* so we just check the page addresses.
*/
static bool
elem_check_phys_contig(const struct rte_memseg_list *msl,
void *start, size_t size)
{
return eal_memalloc_is_contig(msl, start, size);
}
/*
* calculate the starting point of where data of the requested size
* and alignment would fit in the current element. If the data doesn't
* fit, return NULL.
*/
static void *
elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
size_t bound, bool contig)
{
size_t elem_size = elem->size;
/*
* we're allocating from the end, so adjust the size of element by
* alignment size.
*/
while (elem_size >= size) {
const size_t bmask = ~(bound - 1);
uintptr_t end_pt = (uintptr_t)elem +
elem_size - MALLOC_ELEM_TRAILER_LEN;
uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
align);
uintptr_t new_elem_start;
/* check boundary */
if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
align);
end_pt = new_data_start + size;
if (((end_pt - 1) & bmask) != (new_data_start & bmask))
return NULL;
}
new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
/* if the new start point is before the exist start,
* it won't fit
*/
if (new_elem_start < (uintptr_t)elem)
return NULL;
if (contig) {
size_t new_data_size = end_pt - new_data_start;
/*
* if physical contiguousness was requested and we
* couldn't fit all data into one physically contiguous
* block, try again with lower addresses.
*/
if (!elem_check_phys_contig(elem->msl,
(void *)new_data_start,
new_data_size)) {
elem_size -= align;
continue;
}
}
return (void *)new_elem_start;
}
return NULL;
}
/*
* use elem_start_pt to determine if we get meet the size and
* alignment request from the current element
*/
int
malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align,
size_t bound, bool contig)
{
return elem_start_pt(elem, size, align, bound, contig) != NULL;
}
/*
* split an existing element into two smaller elements at the given
* split_pt parameter.
*/
static void
split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
{
struct malloc_elem *next_elem = elem->next;
const size_t old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
const size_t new_elem_size = elem->size - old_elem_size;
malloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size,
elem->orig_elem, elem->orig_size, elem->dirty);
split_pt->prev = elem;
split_pt->next = next_elem;
if (next_elem)
next_elem->prev = split_pt;
else
elem->heap->last = split_pt;
elem->next = split_pt;
elem->size = old_elem_size;
set_trailer(elem);
if (elem->pad) {
/* Update inner padding inner element size. */
elem = RTE_PTR_ADD(elem, elem->pad);
elem->size = old_elem_size - elem->pad;
}
}
/*
* our malloc heap is a doubly linked list, so doubly remove our element.
*/
static void __rte_unused
remove_elem(struct malloc_elem *elem)
{
struct malloc_elem *next, *prev;
next = elem->next;
prev = elem->prev;
if (next)
next->prev = prev;
else
elem->heap->last = prev;
if (prev)
prev->next = next;
else
elem->heap->first = next;
elem->prev = NULL;
elem->next = NULL;
}
static int
next_elem_is_adjacent(struct malloc_elem *elem)
{
const struct internal_config *internal_conf =
eal_get_internal_configuration();
return elem->next == RTE_PTR_ADD(elem, elem->size) &&
elem->next->msl == elem->msl &&
(!internal_conf->match_allocations ||
elem->orig_elem == elem->next->orig_elem);
}
static int
prev_elem_is_adjacent(struct malloc_elem *elem)
{
const struct internal_config *internal_conf =
eal_get_internal_configuration();
return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
elem->prev->msl == elem->msl &&
(!internal_conf->match_allocations ||
elem->orig_elem == elem->prev->orig_elem);
}
/*
* Given an element size, compute its freelist index.
* We free an element into the freelist containing similarly-sized elements.
* We try to allocate elements starting with the freelist containing
* similarly-sized elements, and if necessary, we search freelists
* containing larger elements.
*
* Example element size ranges for a heap with five free lists:
* heap->free_head[0] - (0 , 2^8)
* heap->free_head[1] - [2^8 , 2^10)
* heap->free_head[2] - [2^10 ,2^12)
* heap->free_head[3] - [2^12, 2^14)
* heap->free_head[4] - [2^14, MAX_SIZE]
*/
size_t
malloc_elem_free_list_index(size_t size)
{
#define MALLOC_MINSIZE_LOG2 8
#define MALLOC_LOG2_INCREMENT 2
size_t log2;
size_t index;
if (size < (1UL << MALLOC_MINSIZE_LOG2))
return 0;
/* Find next power of 2 > size. */
log2 = sizeof(size) * 8 - __builtin_clzl(size);
/* Compute freelist index, based on log2(size). */
index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
MALLOC_LOG2_INCREMENT;
return index <= RTE_HEAP_NUM_FREELISTS - 1 ?
index : RTE_HEAP_NUM_FREELISTS - 1;
}
/*
* Add the specified element to its heap's free list.
*/
void
malloc_elem_free_list_insert(struct malloc_elem *elem)
{
size_t idx;
idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
elem->state = ELEM_FREE;
LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
}
/*
* Remove the specified element from its heap's free list.
*/
void
malloc_elem_free_list_remove(struct malloc_elem *elem)
{
LIST_REMOVE(elem, free_list);
}
/*
* reserve a block of data in an existing malloc_elem. If the malloc_elem
* is much larger than the data block requested, we split the element in two.
* This function is only called from malloc_heap_alloc so parameter checking
* is not done here, as it's done there previously.
*/
struct malloc_elem *
malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
size_t bound, bool contig)
{
struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
contig);
const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
const size_t trailer_size = elem->size - old_elem_size - size -
MALLOC_ELEM_OVERHEAD;
malloc_elem_free_list_remove(elem);
if (trailer_size > MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
/* split it, too much free space after elem */
struct malloc_elem *new_free_elem =
RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
asan_clear_split_alloczone(new_free_elem);
split_elem(elem, new_free_elem);
malloc_elem_free_list_insert(new_free_elem);
if (elem == elem->heap->last)
elem->heap->last = new_free_elem;
}
if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
/* don't split it, pad the element instead */
elem->state = ELEM_BUSY;
elem->pad = old_elem_size;
asan_clear_alloczone(elem);
/* put a dummy header in padding, to point to real element header */
if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
* is cache-line aligned */
new_elem->pad = elem->pad;
new_elem->state = ELEM_PAD;
new_elem->size = elem->size - elem->pad;
set_header(new_elem);
}
return new_elem;
}
asan_clear_split_alloczone(new_elem);
/* we are going to split the element in two. The original element
* remains free, and the new element is the one allocated.
* Re-insert original element, in case its new size makes it
* belong on a different list.
*/
split_elem(elem, new_elem);
asan_clear_alloczone(new_elem);
new_elem->state = ELEM_BUSY;
malloc_elem_free_list_insert(elem);
return new_elem;
}
/*
* join two struct malloc_elem together. elem1 and elem2 must
* be contiguous in memory.
*/
static inline void
join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
{
struct malloc_elem *next = elem2->next;
elem1->size += elem2->size;
if (next)
next->prev = elem1;
else
elem1->heap->last = elem1;
elem1->next = next;
elem1->dirty |= elem2->dirty;
if (elem1->pad) {
struct malloc_elem *inner = RTE_PTR_ADD(elem1, elem1->pad);
inner->size = elem1->size - elem1->pad;
}
}
struct malloc_elem *
malloc_elem_join_adjacent_free(struct malloc_elem *elem)
{
/*
* check if next element exists, is adjacent and is free, if so join
* with it, need to remove from free list.
*/
if (elem->next != NULL && elem->next->state == ELEM_FREE &&
next_elem_is_adjacent(elem)) {
void *erase;
size_t erase_len;
/* we will want to erase the trailer and header */
erase = RTE_PTR_SUB(elem->next, MALLOC_ELEM_TRAILER_LEN);
erase_len = MALLOC_ELEM_OVERHEAD + elem->next->pad;
/* remove from free list, join to this one */
malloc_elem_free_list_remove(elem->next);
join_elem(elem, elem->next);
/* erase header, trailer and pad */
memset(erase, MALLOC_POISON, erase_len);
}
/*
* check if prev element exists, is adjacent and is free, if so join
* with it, need to remove from free list.
*/
if (elem->prev != NULL && elem->prev->state == ELEM_FREE &&
prev_elem_is_adjacent(elem)) {
struct malloc_elem *new_elem;
void *erase;
size_t erase_len;
/* we will want to erase trailer and header */
erase = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
erase_len = MALLOC_ELEM_OVERHEAD + elem->pad;
/* remove from free list, join to this one */
malloc_elem_free_list_remove(elem->prev);
new_elem = elem->prev;
join_elem(new_elem, elem);
/* erase header, trailer and pad */
memset(erase, MALLOC_POISON, erase_len);
elem = new_elem;
}
return elem;
}
/*
* free a malloc_elem block by adding it to the free list. If the
* blocks either immediately before or immediately after newly freed block
* are also free, the blocks are merged together.
*/
struct malloc_elem *
malloc_elem_free(struct malloc_elem *elem)
{
void *ptr;
size_t data_len;
ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
data_len = elem->size - MALLOC_ELEM_OVERHEAD;
/*
* Consider the element clean for the purposes of joining.
* If both neighbors are clean or non-existent,
* the joint element will be clean,
* which means the memory should be cleared.
* There is no need to clear the memory if the joint element is dirty.
*/
elem->dirty = false;
elem = malloc_elem_join_adjacent_free(elem);
malloc_elem_free_list_insert(elem);
elem->pad = 0;
/* decrease heap's count of allocated elements */
elem->heap->alloc_count--;
#ifndef RTE_MALLOC_DEBUG
/* Normally clear the memory when needed. */
if (!elem->dirty)
memset(ptr, 0, data_len);
#else
/* Always poison the memory in debug mode. */
memset(ptr, MALLOC_POISON, data_len);
#endif
return elem;
}
/* assume all checks were already done */
void
malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
{
struct malloc_elem *hide_start, *hide_end, *prev, *next;
size_t len_before, len_after;
hide_start = start;
hide_end = RTE_PTR_ADD(start, len);
prev = elem->prev;
next = elem->next;
/* we cannot do anything with non-adjacent elements */
if (next && next_elem_is_adjacent(elem)) {
len_after = RTE_PTR_DIFF(next, hide_end);
if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
asan_clear_split_alloczone(hide_end);
/* split after */
split_elem(elem, hide_end);
malloc_elem_free_list_insert(hide_end);
} else if (len_after > 0) {
RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
return;
}
}
/* we cannot do anything with non-adjacent elements */
if (prev && prev_elem_is_adjacent(elem)) {
len_before = RTE_PTR_DIFF(hide_start, elem);
if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
asan_clear_split_alloczone(hide_start);
/* split before */
split_elem(elem, hide_start);
prev = elem;
elem = hide_start;
malloc_elem_free_list_insert(prev);
} else if (len_before > 0) {
RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
return;
}
}
asan_clear_alloczone(elem);
remove_elem(elem);
}
/*
* attempt to resize a malloc_elem by expanding into any free space
* immediately after it in memory.
*/
int
malloc_elem_resize(struct malloc_elem *elem, size_t size)
{
const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
/* if we request a smaller size, then always return ok */
if (elem->size >= new_size) {
asan_clear_alloczone(elem);
return 0;
}
/* check if there is a next element, it's free and adjacent */
if (!elem->next || elem->next->state != ELEM_FREE ||
!next_elem_is_adjacent(elem))
return -1;
if (elem->size + elem->next->size < new_size)
return -1;
/* we now know the element fits, so remove from free list,
* join the two
*/
malloc_elem_free_list_remove(elem->next);
join_elem(elem, elem->next);
if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
/* now we have a big block together. Lets cut it down a bit, by splitting */
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
asan_clear_split_alloczone(split_pt);
split_elem(elem, split_pt);
malloc_elem_free_list_insert(split_pt);
}
asan_clear_alloczone(elem);
return 0;
}
static inline const char *
elem_state_to_str(enum elem_state state)
{
switch (state) {
case ELEM_PAD:
return "PAD";
case ELEM_BUSY:
return "BUSY";
case ELEM_FREE:
return "FREE";
}
return "ERROR";
}
void
malloc_elem_dump(const struct malloc_elem *elem, FILE *f)
{
fprintf(f, "Malloc element at %p (%s)\n", elem,
elem_state_to_str(elem->state));
fprintf(f, " len: 0x%zx pad: 0x%" PRIx32 "\n", elem->size, elem->pad);
fprintf(f, " prev: %p next: %p\n", elem->prev, elem->next);
}
|
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
#define fr(i,k,n) for(ll i = k;i<n;i++)
#define mo map<ll,ll>
//map<ll,ll> ms;
vector<ll> v;
ll arr[2000000 + 1];
ll pos[2000000 + 1];
ll neg[2000000 + 1];
int main(){
ios_base::sync_with_stdio(false);
cin.tie(NULL);
set<ll> se;
ll n;
cin>>n;
fr(i,1,n+1){
cin>>arr[i];
}
fr(i,1,n+1){
if(arr[i]>0){
pos[i] = 1 + pos[i-1];
neg[i] = neg[i-1];
}
if(arr[i]<0){
neg[i] = 1 + pos[i-1];
pos[i] = neg[i-1];
}
}
ll p = 0;
ll ns = 0;
fr(i,1,n+1){
p += pos[i];
ns += neg[i];
}
cout<<ns<<" "<<p<<"\n";
} |
<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.quantum.jobs.implementation;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpPipelineBuilder;
import com.azure.core.http.policy.CookiePolicy;
import com.azure.core.http.policy.RetryPolicy;
import com.azure.core.http.policy.UserAgentPolicy;
import com.azure.core.util.serializer.JacksonAdapter;
import com.azure.core.util.serializer.SerializerAdapter;
/** Initializes a new instance of the QuantumClient type. */
public final class QuantumClientImpl {
/** The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000). */
private final String subscriptionId;
/**
* Gets The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
*
* @return the subscriptionId value.
*/
public String getSubscriptionId() {
return this.subscriptionId;
}
/** Name of an Azure resource group. */
private final String resourceGroupName;
/**
* Gets Name of an Azure resource group.
*
* @return the resourceGroupName value.
*/
public String getResourceGroupName() {
return this.resourceGroupName;
}
/** Name of the workspace. */
private final String workspaceName;
/**
* Gets Name of the workspace.
*
* @return the workspaceName value.
*/
public String getWorkspaceName() {
return this.workspaceName;
}
/** server parameter. */
private final String host;
/**
* Gets server parameter.
*
* @return the host value.
*/
public String getHost() {
return this.host;
}
/** The HTTP pipeline to send requests through. */
private final HttpPipeline httpPipeline;
/**
* Gets The HTTP pipeline to send requests through.
*
* @return the httpPipeline value.
*/
public HttpPipeline getHttpPipeline() {
return this.httpPipeline;
}
/** The serializer to serialize an object into a string. */
private final SerializerAdapter serializerAdapter;
/**
* Gets The serializer to serialize an object into a string.
*
* @return the serializerAdapter value.
*/
public SerializerAdapter getSerializerAdapter() {
return this.serializerAdapter;
}
/** The JobsImpl object to access its operations. */
private final JobsImpl jobs;
/**
* Gets the JobsImpl object to access its operations.
*
* @return the JobsImpl object.
*/
public JobsImpl getJobs() {
return this.jobs;
}
/** The ProvidersImpl object to access its operations. */
private final ProvidersImpl providers;
/**
* Gets the ProvidersImpl object to access its operations.
*
* @return the ProvidersImpl object.
*/
public ProvidersImpl getProviders() {
return this.providers;
}
/** The StoragesImpl object to access its operations. */
private final StoragesImpl storages;
/**
* Gets the StoragesImpl object to access its operations.
*
* @return the StoragesImpl object.
*/
public StoragesImpl getStorages() {
return this.storages;
}
/** The QuotasImpl object to access its operations. */
private final QuotasImpl quotas;
/**
* Gets the QuotasImpl object to access its operations.
*
* @return the QuotasImpl object.
*/
public QuotasImpl getQuotas() {
return this.quotas;
}
/**
* Initializes an instance of QuantumClient client.
*
* @param subscriptionId The Azure subscription ID. This is a GUID-formatted string (e.g.
* 00000000-0000-0000-0000-000000000000).
* @param resourceGroupName Name of an Azure resource group.
* @param workspaceName Name of the workspace.
* @param host server parameter.
*/
public QuantumClientImpl(String subscriptionId, String resourceGroupName, String workspaceName, String host) {
this(
new HttpPipelineBuilder()
.policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy())
.build(),
JacksonAdapter.createDefaultSerializerAdapter(),
subscriptionId,
resourceGroupName,
workspaceName,
host);
}
/**
* Initializes an instance of QuantumClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param subscriptionId The Azure subscription ID. This is a GUID-formatted string (e.g.
* 00000000-0000-0000-0000-000000000000).
* @param resourceGroupName Name of an Azure resource group.
* @param workspaceName Name of the workspace.
* @param host server parameter.
*/
public QuantumClientImpl(
HttpPipeline httpPipeline,
String subscriptionId,
String resourceGroupName,
String workspaceName,
String host) {
this(
httpPipeline,
JacksonAdapter.createDefaultSerializerAdapter(),
subscriptionId,
resourceGroupName,
workspaceName,
host);
}
/**
* Initializes an instance of QuantumClient client.
*
* @param httpPipeline The HTTP pipeline to send requests through.
* @param serializerAdapter The serializer to serialize an object into a string.
* @param subscriptionId The Azure subscription ID. This is a GUID-formatted string (e.g.
* 00000000-0000-0000-0000-000000000000).
* @param resourceGroupName Name of an Azure resource group.
* @param workspaceName Name of the workspace.
* @param host server parameter.
*/
public QuantumClientImpl(
HttpPipeline httpPipeline,
SerializerAdapter serializerAdapter,
String subscriptionId,
String resourceGroupName,
String workspaceName,
String host) {
this.httpPipeline = httpPipeline;
this.serializerAdapter = serializerAdapter;
this.subscriptionId = subscriptionId;
this.resourceGroupName = resourceGroupName;
this.workspaceName = workspaceName;
this.host = host;
this.jobs = new JobsImpl(this);
this.providers = new ProvidersImpl(this);
this.storages = new StoragesImpl(this);
this.quotas = new QuotasImpl(this);
}
}
|
/// Returns an iterator over response statuses, sorted lexicographically by
/// message id.
pub fn statuses(&self) -> impl Iterator<Item = (&MessageId, &IngressStatus)> {
self.statuses
.iter()
.map(|(id, status)| (id, status.as_ref()))
} |
<reponame>codewillclick/densitygrid
#!/usr/bin/env python
import re
import sys
import random as R
inc = None
over = None
count = 50
frame = [0,0,300,200]
if len(sys.argv) > 1:
frame = map(int,sys.argv[1].split(','))
if len(sys.argv) > 2:
try:
count = int(sys.argv[2])
except ValueError:
m = re.match(r'^(\d+)/(\d+)$',sys.argv[2])
inc = int(m.group(1))
over = int(m.group(2))
def rand(a,b,count=2):
n = R.randint(a,b)
if count <= 1:
return (n % (b-a)) + a
return (n + rand(a,b,count-1)) % (b-a)
if inc and over:
print "x,y,z"
for y in xrange(frame[1],frame[3]):
v = y * inc
for x in xrange(frame[0],frame[2]):
v = max(x,y) % over
print ','.join(map(str,(x,y,v)))
v += inc
else:
print "x,y"
print "\n".join(
(str(x)+','+str(y) for x,y in
((rand(frame[0],frame[2]),rand(frame[1],frame[3]))
for i in xrange(count)))
)
|
a = int(input())
b = int(input())
c = int(input())
nums = [a, b, c]
sums = []
sums.append(nums[0]+nums[1]*nums[2])
sums.append(nums[0]*(nums[1]+nums[2]))
sums.append(nums[0]*nums[1]*nums[2])
sums.append((nums[0]+nums[1])*nums[2])
sums.append(nums[0]+nums[1]+nums[2])
print(max(sums))
|
/**
* Class {@link Builder} follows the builder pattern for {@link GitHubRequest}.
*
* @param <B>
* The type of {@link Builder} to return from the various "with*" methods.
*/
static class Builder<B extends Builder<B>> {
@Nonnull
private final List<Entry> args;
/**
* The header values for this request.
*/
@Nonnull
private final Map<String, String> headers;
/**
* Injected local data map
*/
@Nonnull
private final Map<String, Object> injectedMappingValues;
/**
* The base GitHub API for this request.
*/
@Nonnull
private String apiUrl;
@Nonnull
private String urlPath;
/**
* Request method.
*/
@Nonnull
private String method;
@Nonnull
private RateLimitTarget rateLimitTarget;
private InputStream body;
private boolean forceBody;
/**
* Create a new {@link GitHubRequest.Builder}
*/
protected Builder() {
this(new ArrayList<>(),
new LinkedHashMap<>(),
new LinkedHashMap<>(),
GitHubClient.GITHUB_URL,
"/",
"GET",
RateLimitTarget.CORE,
null,
false);
}
private Builder(@Nonnull List<Entry> args,
@Nonnull Map<String, String> headers,
@Nonnull Map<String, Object> injectedMappingValues,
@Nonnull String apiUrl,
@Nonnull String urlPath,
@Nonnull String method,
@Nonnull RateLimitTarget rateLimitTarget,
@CheckForNull @WillClose InputStream body,
boolean forceBody) {
this.args = new ArrayList<>(args);
this.headers = new LinkedHashMap<>(headers);
this.injectedMappingValues = new LinkedHashMap<>(injectedMappingValues);
this.apiUrl = apiUrl;
this.urlPath = urlPath;
this.method = method;
this.rateLimitTarget = rateLimitTarget;
this.body = body;
this.forceBody = forceBody;
}
/**
* Builds a {@link GitHubRequest} from this builder.
*
* @return a {@link GitHubRequest}
* @throws MalformedURLException
* if the GitHub API URL cannot be constructed
*/
public GitHubRequest build() throws MalformedURLException {
return new GitHubRequest(args,
headers,
injectedMappingValues,
apiUrl,
urlPath,
method,
rateLimitTarget,
body,
forceBody);
}
/**
* With header requester.
*
* @param url
* the url
* @return the request builder
*/
public B withApiUrl(String url) {
this.apiUrl = url;
return (B) this;
}
/**
* Sets the request HTTP header.
* <p>
* If a header of the same name is already set, this method overrides it.
*
* @param name
* the name
* @param value
* the value
* @return the request builder
*/
public B setHeader(String name, String value) {
headers.put(name, value);
return (B) this;
}
/**
* With header requester.
*
* @param name
* the name
* @param value
* the value
* @return the request builder
*/
public B withHeader(String name, String value) {
String oldValue = headers.get(name);
if (!StringUtils.isBlank(oldValue)) {
value = oldValue + ", " + value;
}
return setHeader(name, value);
}
/**
* Object to inject into binding.
*
* @param value
* the value
* @return the request builder
*/
public B injectMappingValue(@NonNull Object value) {
return injectMappingValue(value.getClass().getName(), value);
}
/**
* Object to inject into binding.
*
* @param name
* the name
* @param value
* the value
* @return the request builder
*/
public B injectMappingValue(@NonNull String name, Object value) {
this.injectedMappingValues.put(name, value);
return (B) this;
}
public B withPreview(String name) {
return withHeader("Accept", name);
}
public B withPreview(Previews preview) {
return withPreview(preview.mediaType());
}
/**
* With requester.
*
* @param map
* map of key value pairs to add
* @return the request builder
*/
public B with(Map<String, Object> map) {
for (Map.Entry<String, Object> entry : map.entrySet()) {
with(entry.getKey(), entry.getValue());
}
return (B) this;
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, int value) {
return with(key, (Object) value);
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, long value) {
return with(key, (Object) value);
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, boolean value) {
return with(key, (Object) value);
}
/**
* With requester.
*
* @param key
* the key
* @param e
* the e
* @return the request builder
*/
public B with(String key, Enum<?> e) {
if (e == null)
return with(key, (Object) null);
return with(key, transformEnum(e));
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, String value) {
return with(key, (Object) value);
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, Collection<?> value) {
return with(key, (Object) value);
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, Map<?, ?> value) {
return with(key, (Object) value);
}
/**
* With requester.
*
* @param body
* the body
* @return the request builder
*/
public B with(@WillClose /* later */ InputStream body) {
this.body = body;
return (B) this;
}
/**
* With nullable requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B withNullable(String key, Object value) {
args.add(new Entry(key, value));
return (B) this;
}
/**
* With requester.
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B with(String key, Object value) {
if (value != null) {
args.add(new Entry(key, value));
}
return (B) this;
}
/**
* Unlike {@link #with(String, String)}, overrides the existing value
*
* @param key
* the key
* @param value
* the value
* @return the request builder
*/
public B set(String key, Object value) {
remove(key);
return with(key, value);
}
/**
* Removes all arg entries for a specific key.
*
* @param key
* the key
* @return the request builder
*/
public B remove(String key) {
for (int index = 0; index < args.size();) {
if (args.get(index).key.equals(key)) {
args.remove(index);
} else {
index++;
}
}
return (B) this;
}
/**
* Method requester.
*
* @param method
* the method
* @return the request builder
*/
public B method(@Nonnull String method) {
this.method = method;
return (B) this;
}
/**
* Method requester.
*
* @param rateLimitTarget
* the rate limit target for this request. Default is {@link RateLimitTarget#CORE}.
* @return the request builder
*/
public B rateLimit(@Nonnull RateLimitTarget rateLimitTarget) {
this.rateLimitTarget = rateLimitTarget;
return (B) this;
}
/**
* Content type requester.
*
* @param contentType
* the content type
* @return the request builder
*/
public B contentType(String contentType) {
this.headers.put("Content-type", contentType);
return (B) this;
}
/**
* NOT FOR PUBLIC USE. Do not make this method public.
* <p>
* Sets the path component of api URL without URI encoding.
* <p>
* Should only be used when passing a literal URL field from a GHObject, such as {@link GHContent#refresh()} or
* when needing to set query parameters on requests methods that don't usually have them, such as
* {@link GHRelease#uploadAsset(String, InputStream, String)}.
*
* @param rawUrlPath
* the content type
* @return the request builder
*/
B setRawUrlPath(@Nonnull String rawUrlPath) {
Objects.requireNonNull(rawUrlPath);
// This method should only work for full urls, which must start with "http"
if (!rawUrlPath.startsWith("http")) {
throw new GHException("Raw URL must start with 'http'");
}
this.urlPath = rawUrlPath;
return (B) this;
}
/**
* Path component of api URL. Appended to api url.
* <p>
* If urlPath starts with a slash, it will be URI encoded as a path. If it starts with anything else, it will be
* used as is.
*
* @param urlPathItems
* the content type
* @return the request builder
*/
public B withUrlPath(@Nonnull String urlPath, @Nonnull String... urlPathItems) {
// full url may be set and reset as needed
if (urlPathItems.length == 0 && !urlPath.startsWith("/")) {
return setRawUrlPath(urlPath);
}
// Once full url is set, do not allow path setting
if (!this.urlPath.startsWith("/")) {
throw new GHException("Cannot append to url path after setting a full url");
}
String tailUrlPath = urlPath;
if (urlPathItems.length != 0) {
tailUrlPath += "/" + String.join("/", urlPathItems);
}
tailUrlPath = StringUtils.prependIfMissing(tailUrlPath, "/");
this.urlPath = urlPathEncode(tailUrlPath);
return (B) this;
}
/**
* Small number of GitHub APIs use HTTP methods somewhat inconsistently, and use a body where it's not expected.
* Normally whether parameters go as query parameters or a body depends on the HTTP verb in use, but this method
* forces the parameters to be sent as a body.
*
* @return the request builder
*/
public B inBody() {
forceBody = true;
return (B) this;
}
} |
On the morning of April 24, 2013, at about 8:45 a.m., the Rana Plaza, a nine-story building housing factories and offices, collapsed in the Bangladeshi capital Dhaka. More than 3,500 people were in the building at the time, and 1,129 died in the wreckage. Mainuddin Khandaker, a senior official at the Ministry of Home Affairs, began his investigation that evening.
Six weeks later, Khandaker is sitting in a carved wooden armchair in his living room. A soft-spoken man in his early 60s who wears gold-rimmed glasses, he lives in Dhaka's government district. It's been dark outside for a while, and a single fluorescent tube, surrounded by fluttering moths, is the only light in the room. Khandaker is balancing a bowl of dark berries on his knees.
In the last few years, he has investigated more than 40 cases of factories that either collapsed or burnt down. But none of those accidents approached the scale of Rana Plaza, the biggest industrial accident in the country's history. The 493-page report Khandaker wrote contains witness statements, photos and structural engineering calculations. It will probably remain under lock and key. An investigative report on the textile industry has never been published in Bangladesh, he says.
Khandaker goes into the next room and returns with a stack of paper, the summary. He eats a berry from the bowl and carefully spits the seed onto the saucer. "Time pressure, lots of money, a lack of scruples and greed -- everything came together on that day," he says.
Dhaka, Basti Madjipur, April 24, 5:30 a.m.
The day began early in Basti Madjipur, a neighborhood in Dhaka's northwestern district of Savar.
A "basti" is a sprawling collection of simple concrete houses with sheet and corrugated metal roofs, separated by labyrinthine paths, where half-naked children play between puddles and ponds. There are chicken sheds everywhere, and the neighborhood is also home to a drove of small pigs. About 100,000 people live there. Everyone works in the nearby textile factories, and at the time, many were employed at the Rana Plaza building.
Mohammed Badul and his wife Shali got up at 5:30 a.m. that morning. Badul worked in the packaging department of a company called Phantom Apparels Ltd., in the Rana Plaza. His wife was a seamstress in another factory.
They live in a 12-square-meter (130-square-foot) room with a concrete floor, together with their nine-year-old son Sabbir. They own a bed, a dresser and dishes, some clothing and a TV set. A faucet and a shower are outside.
Like most mornings, Shali cooked rice with a little oil and a small amount of vegetables to bring to work as lunch for her and her husband. They didn't eat breakfast. At about 7 a.m., they left on foot for their respective factories. Shali had given her husband his lunch in a tin can. The one-hour lunch break began at 1 p.m.
The normal shift usually lasted until 8 p.m., but workers were often kept later for overtime, until 10 p.m. Shali went shopping on the way home, arriving at the house by around 11 p.m. Her son was already sleeping.
In 11 years, the couple has saved 20,000 taka, or 200 ($260). Mohammed Badul dreams of opening a barber shop one day.
Another couple, Fahima and her husband Abu Said also live in Basti Madjipur. Both are from the village of Bodergond in northern Bangladesh. They came to Dhaka six years ago, forced to leave their village and look for work so that they could repay their debts. Abu Said had borrowed 50,000 taka because he wanted to start his own company.
Together, they earn about 12,000 taka a month, of which they are able to save about 500 taka, or 5. They don't have a bed, or even a mattress. Abu Said indulges in only one luxury item: He occasionally buys a tin of chewing tobacco, which he always carries in his back pocket.
Fahima and Abu Said also have a son, five-year-old Shahin. A neighbor takes care of the boy when Fahima and her husband are at the factory. Their plan is to persevere. After a few years, they hope to return to their village with a little money in their pockets. Their goal in life is to create a future for Shahin.
Two sisters, Shefali and Shirin Akter, 20 and 18, live in a house on the same street. They came to Dhaka as children, left to their own devices, with nothing but two pairs of trousers and two shirts. Their little brother Nawshad joined them later. He is their hope, and they are working and saving money to pay for his education. They own four cups, a double bed, a TV set and two pieces of soap. They also own a basket with some clothing, three pairs of flip-flops, a pot, cutlery and some dishes.
Like most of the people who have come to this neighborhood, Shefali, Shirin and Nawshad live in one room, for which they pay a monthly rent of 2,000 taka. Many of these houses were built by the same man who responsible for the construction of Rana Plaza: Sohel Rana, the godfather of Madjipur.
Rana is a short, puffy man in his mid-30s. He lives on Bazar Road, in a five-story house at the end of a path, with a metal gate to keep out intruders. Rana takes bodyguards with him when he leaves the house. He is a member of the youth organization of the ruling Awami Party, and recently had posters of himself hung up on walls in his basti. Some in the neighborhood already see Rana as a member of parliament.
But he had a problem on that morning. Cracks had appeared in the walls and load-bearing columns of the Rana Plaza building. The tenants, especially those pesky factory owners, were concerned.
Dhaka, Madjipur Bazar Road, Rana Plaza, 7:30 a.m.
A few hundred people had gathered in front of Rana Plaza. Mohammed Badul, the man who was saving for a barber shop, was there, and so were Fahima and Abu Said, the couple that doesn't even own a mattress. They were afraid. Their shift was to begin in half an hour -- that is, if it began at all.
Rana Plaza was taller than the surrounding buildings, with lower floors that were sided with a reflective blue glass material. Eight floors were already occupied, and construction had recently begun on a ninth. The words "Rana Plaza" were written in decorative letters above the entrance.
A branch of Brac Bank was on the second floor, and a sign indicating that the bank was closed had been hanging on the door since the day before. Not a good sign, Fahima thought to herself.
There were 10 million taka in the bank vault. The bank employees were in such a hurry to get to a safe place that they had left the money behind.
Sohel Rana was standing at the entrance, talking insistently to the factory owners. He told them that a few cracks were nothing to fear, and that someone would take care of the problem. On the previous day, managers had shut down all five textile factories and sent home some 3,500 people -- including Fahima, Mohammed Badul and the two sisters, Shefali and Shirin -- in the middle of the day. Then experts came and inspected the cracks.
Could the shift begin? Or was the building beyond repair? Many people were sent to their deaths because Rana was the person who ended up making that decision.
Born in Dhaka after his father moved there in the early 1980s, Rana grew up in the city, where he attended the Adhar Chandra High School. "Making money was the most important thing to him," says Khandaker, the investigator. To achieve his goal, Rana bought some land.
When he was mapping out his career, Dhaka was already a sprawling city. Hundreds of thousands migrated there from the countryside each year, and new factories were constantly being built. These workers needed housing, and the manufacturers needed production facilities.
Rana's father had property early on, and his son bought more. It was swampy land, which meant that it was cheap. Rana had the swamp filled with sand and garbage, and in 2007, he began the construction of a multistory building there. Rana Plaza was to be the beginning of a great career.
Normally, the approval of six government agencies is required to build a factory in Bangladesh: the Ministry of Industries, the Ministry of Labor, the Ministry of Home Affairs, the office of fire safety and civil protection, the office of the environment, the Board of Investment, and the Bangladesh Garment Manufacturers and Exporters Association (BGMEA). Rana circumvented these requirements by declaring the plaza as an office and retail building. He used his father's contacts and donated money to the campaign of a member of parliament to get it approved. That's how the Bangladeshi system works. A country growing as quickly as Bangladesh can't spend too much time on regulations. Once a building has been erected and is filled with machines and people working to bring money into the country, no one asks about permits.
Rana opened the building in 2008. It had been built hastily with thin subfloors, and the bricklayers had apparently never built such a tall building before. Too much sand was added to the concrete, and the ground was too soft -- all problems that investigator Khandaker would later discover. |
Practical Application of the New Classification Scheme for Congenital Melanocytic Nevi
A new consensus‐based classification of congenital melanocytic nevi (CMN) has recently been proposed. It includes categories for projected adult size (PAS) and location, satellite nevi counts, and morphologic characteristics (color heterogeneity, rugosity, nodularity, and hypertrichosis). The objective of the current study was to test the applicability of the new categorization scheme and to correlate classification outcome with the patient's history of melanoma and neurocutaneous melanocytosis (NCM). Children and adults with CMN attending a patient conference in Dallas, Texas, in 2012 were invited to participate in the study. Anamnestical data were collected using a standardized questionnaire. Two dermatologists performed clinical examinations. Of 45 patients enrolled, 33 had a giant CMN (G1 , n = 13; G2 , n = 20), 12 had an NCM (5 symptomatic, 7 asymptomatic), and 1 had a history of melanoma. CMN size was positively correlated with NCM (p < 0.05). The classification system allowed an easy and detailed phenotypic characterization of each individual CMN. CMN size and morphology were difficult to assess in patients after surgical removal, and the number of satellite nevi at birth or during infancy was not always known. Our report provides practical aids for the application of the newly proposed CMN classification. Prospective evaluation of accurately classified patients in CMN registries will reveal the predictive value of the scheme. The small study sample limits meaningful conclusions regarding the correlation between CMN parameters and the risk of NCM and melanoma. |
/**
* A Datapoint consumer implementation for a long datapoint.
*/
public class LongDataPointConsumer extends DataPointConsumerGeneric<Long>
implements IDataPointConsumer<Long> {
/**
* Constructor.
*
* @param remoteGroup Group identifier of the remote datapoint
* @param remoteClient Client identifier of the remote datapoint
* @param identifier Datapoint identifier
*/
public LongDataPointConsumer(String remoteGroup, String remoteClient, List<String> identifier) {
super(remoteGroup, remoteClient, identifier, new LongValue());
}
} |
def gae(rewards: np.ndarray,
values: np.ndarray,
gamma: float,
lam: float) -> np.ndarray:
T, = rewards.shape
v_next = values[1:]
v_current = values[:-1]
delta = rewards + gamma * v_next - v_current
t = lambda i: T - 1 - i
return jax.lax.fori_loop(
0, T,
lambda i, adv: jax.ops.index_update(
adv, t(i), delta[t(i)] + (gamma * lam) * adv[t(i) + 1]
),
np.zeros(T)) |
<gh_stars>0
/*
* Copyright 2018 - 2021 Blazebit.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.blazebit.job.testsuite;
import com.blazebit.actor.ActorContext;
import com.blazebit.actor.spi.ClusterNodeInfo;
import com.blazebit.actor.spi.ClusterStateListener;
import com.blazebit.actor.spi.ClusterStateManager;
import com.blazebit.actor.spi.LockService;
import com.blazebit.actor.spi.StateReturningEvent;
import com.blazebit.job.JobContext;
import com.blazebit.job.JobInstanceProcessingContext;
import com.blazebit.job.JobInstanceState;
import com.blazebit.job.JobRateLimitException;
import com.blazebit.job.JobTemporaryException;
import com.blazebit.job.memory.model.JobConfiguration;
import com.blazebit.job.memory.model.TimeFrame;
import com.blazebit.job.spi.JobInstanceProcessorFactory;
import com.blazebit.job.spi.PartitionKeyProvider;
import com.blazebit.job.spi.TransactionSupport;
import org.junit.Test;
import java.io.Serializable;
import java.time.Clock;
import java.time.Instant;
import java.time.Year;
import java.time.ZoneOffset;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class JobInstanceTest extends AbstractJobTest {
private BlockingQueue<Object> sink;
public JobInstanceTest() {
this.sink = new ArrayBlockingQueue<>(1024);
}
@Test
public void testJobInstanceSchedule() throws Exception {
// GIVEN
this.jobContext = builder()
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
sink.add(jobInstance);
return null;
})))
.createContext();
// WHEN
jobContext.getJobManager().addJobInstance(new SimpleJobInstance());
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(1, sink.size());
}
@Test
public void testFailSchedulerJobInstance() throws Exception {
// GIVEN
this.jobContext = builder().createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance() {
@Override
public JobConfiguration getJobConfiguration() {
throw new RuntimeException();
}
};
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.FAILED, jobInstance.getState());
}
@Test
public void testFailJobInstance() throws Exception {
// GIVEN
this.jobContext = builder()
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
throw new RuntimeException();
})))
.createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.FAILED, jobInstance.getState());
}
@Test
public void testFailRateLimitJobInstance() throws Exception {
// GIVEN
Clock clock = Clock.fixed(Instant.parse("2018-01-01T00:00:00.00Z"), ZoneOffset.UTC);
// We wait for 3x setScheduleTime invocations. Constructor, setter and via rate limiting
this.jobContext = builder(3).withService(Clock.class, clock)
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
throw new JobRateLimitException(1000L);
})))
.createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance() {
@Override
public void setScheduleTime(Instant scheduleTime) {
super.setScheduleTime(scheduleTime);
latch.countDown();
}
};
jobInstance.setCreationTime(clock.instant());
jobInstance.setScheduleTime(jobInstance.getCreationTime());
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.NEW, jobInstance.getState());
assertEquals(Instant.parse("2018-01-01T00:00:00.00Z").plusSeconds(1L), jobInstance.getScheduleTime());
}
@Test
public void testFailTemporaryJobInstance() throws Exception {
// GIVEN
Clock clock = Clock.fixed(Instant.parse("2018-01-01T00:00:00.00Z"), ZoneOffset.UTC);
// We wait for 3x setScheduleTime invocations. Constructor, setter and via temporary exception handling
this.jobContext = builder(3).withService(Clock.class, clock)
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
throw new JobTemporaryException(1000L);
})))
.createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance() {
@Override
public void setScheduleTime(Instant scheduleTime) {
super.setScheduleTime(scheduleTime);
latch.countDown();
}
};
jobInstance.setCreationTime(clock.instant());
jobInstance.setScheduleTime(jobInstance.getCreationTime());
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.NEW, jobInstance.getState());
assertEquals(Instant.parse("2018-01-01T00:00:00.00Z").plusSeconds(1L), jobInstance.getScheduleTime());
}
@Test
public void testFailJobInstanceTransaction() throws Exception {
// GIVEN
CountDownLatch txLatch = new CountDownLatch(1);
this.jobContext = builder()
.withTransactionSupport(new TransactionSupport() {
@Override
public <T> T transactional(JobContext context, long transactionTimeoutMillis, boolean joinIfPossible, Callable<T> callable, Consumer<Throwable> exceptionHandler) {
if (txLatch.getCount() == 0) {
try {
return callable.call();
} catch (Exception e) {
exceptionHandler.accept(e);
return null;
}
}
txLatch.countDown();
return null;
}
@Override
public void registerPostCommitListener(Runnable o) {
o.run();
}
})
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
sink.add(jobInstance);
return null;
})))
.createContext();
// WHEN
jobContext.getJobManager().addJobInstance(new SimpleJobInstance());
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(1, sink.size());
assertEquals(0, txLatch.getCount());
}
@Test
public void testDeadline() throws Exception {
// GIVEN
this.jobContext = builder().createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.getJobConfiguration().setDeadline(jobInstance.getCreationTime());
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DEADLINE_REACHED, jobInstance.getState());
}
@Test
public void testExecutionTimeFrames() throws Exception {
// GIVEN
this.jobContext = builder().createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
TimeFrame timeFrame = new TimeFrame();
timeFrame.setEndYear(Year.of(2018));
jobInstance.getJobConfiguration().getExecutionTimeFrames().add(timeFrame);
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DROPPED, jobInstance.getState());
}
@Test(timeout = 5000L)
public void testDefer() throws Exception {
// GIVEN
Clock clock = Clock.fixed(Instant.parse("2018-01-01T00:00:00.00Z"), ZoneOffset.UTC);
this.jobContext = builder().withService(Clock.class, clock).createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance() {
@Override
public void markDeferred(JobInstanceProcessingContext<?> jobProcessingContext, Instant newScheduleTime) {
super.markDeferred(jobProcessingContext, newScheduleTime);
latch.countDown();
}
};
jobInstance.setCreationTime(clock.instant());
jobInstance.setScheduleTime(jobInstance.getCreationTime());
TimeFrame timeFrame = new TimeFrame();
timeFrame.setStartYear(Year.of(2019));
jobInstance.getJobConfiguration().setMaximumDeferCount(1);
jobInstance.getJobConfiguration().getExecutionTimeFrames().add(timeFrame);
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop();
assertEquals(JobInstanceState.NEW, jobInstance.getState());
assertEquals(1, jobInstance.getDeferCount());
assertEquals(Instant.parse("2019-01-01T00:00:00.00Z"), jobInstance.getScheduleTime());
}
@Test(timeout = 5000L)
public void testDeferDrop() throws Exception {
// GIVEN
Clock clock = Clock.fixed(Instant.parse("2018-01-01T00:00:00.00Z"), ZoneOffset.UTC);
this.jobContext = builder().withService(Clock.class, clock).createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setCreationTime(clock.instant());
jobInstance.setScheduleTime(jobInstance.getCreationTime());
TimeFrame timeFrame = new TimeFrame();
timeFrame.setStartYear(Year.of(2019));
jobInstance.getJobConfiguration().setMaximumDeferCount(0);
jobInstance.getJobConfiguration().getExecutionTimeFrames().add(timeFrame);
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop();
assertEquals(JobInstanceState.DROPPED, jobInstance.getState());
assertEquals(1, jobInstance.getDeferCount());
assertEquals(Instant.parse("2019-01-01T00:00:00.00Z"), jobInstance.getScheduleTime());
}
@Test
public void testRefreshJobInstanceSchedulesSpecific() throws Exception {
// GIVEN
this.jobContext = builder().createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setState(JobInstanceState.DONE);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
jobInstance.setState(JobInstanceState.NEW);
jobContext.refreshJobInstanceSchedules(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
}
@Test
public void testRefreshJobInstanceSchedulesRescan() throws Exception {
// GIVEN
this.jobContext = builder().createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setState(JobInstanceState.DONE);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
jobInstance.setState(JobInstanceState.NEW);
jobContext.refreshJobInstanceSchedules(0L);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
}
@Test
public void testRefreshJobInstanceSchedulesGeneral() throws Exception {
// GIVEN
this.jobContext = builder().createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setState(JobInstanceState.DONE);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
jobInstance.setState(JobInstanceState.NEW);
jobContext.refreshJobInstanceSchedules(jobInstance.getScheduleTime().toEpochMilli());
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
}
@Test
public void testRefreshJobInstanceSchedulesPartition() throws Exception {
// GIVEN
JobContext.Builder builder = builder();
PartitionKeyProvider partitionKeyProvider = builder.getPartitionKeyProviderFactory().createPartitionKeyProvider(null, null);
this.jobContext = builder.createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setState(JobInstanceState.DONE);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
jobInstance.setState(JobInstanceState.NEW);
jobContext.refreshJobInstanceSchedules(partitionKeyProvider.getDefaultJobInstancePartitionKeys().iterator().next(), jobInstance.getScheduleTime().toEpochMilli());
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
}
@Test
public void testChunkingTest() throws Exception {
// GIVEN
this.jobContext = builder(2)
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
sink.add(jobInstance);
return sink.size() == 1 ? true : null;
})))
.createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
// WHEN
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
assertEquals(2, sink.size());
}
@Test
public void testChangeClusterWhileIdle() throws Exception {
// GIVEN
MutableClusterStateManager clusterStateManager = new MutableClusterStateManager();
this.jobContext = builder().withActorContextBuilder(ActorContext.builder().withClusterStateManager(clusterStateManager)).createContext();
// WHEN
clusterStateManager.setClusterSize(2);
clusterStateManager.setClusterPosition(1);
clusterStateManager.fireClusterStateChanged();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setState(JobInstanceState.NEW);
jobContext.getJobManager().addJobInstance(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
}
@Test
public void testChangeClusterWhileIdleWorkStealing() throws Exception {
// GIVEN
MutableClusterStateManager clusterStateManager = new MutableClusterStateManager();
this.jobContext = builder().withActorContextBuilder(ActorContext.builder().withClusterStateManager(clusterStateManager)).createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance();
jobInstance.setState(JobInstanceState.DONE);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
jobInstance.setState(JobInstanceState.NEW);
clusterStateManager.setClusterSize(2);
clusterStateManager.setClusterPosition(1);
clusterStateManager.fireClusterStateChanged();
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.DONE, jobInstance.getState());
}
@Test
public void testLongRunning() throws Exception {
// GIVEN
MutableClusterStateManager clusterStateManager = new MutableClusterStateManager();
CountDownLatch processorEnterLatch = new CountDownLatch(1);
CountDownLatch processorLatch = new CountDownLatch(1);
this.jobContext = builder()
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
processorEnterLatch.countDown();
try {
processorLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return null;
})))
.withActorContextBuilder(ActorContext.builder().withClusterStateManager(clusterStateManager)).createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance(true);
jobInstance.setState(JobInstanceState.NEW);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
processorEnterLatch.await();
int clusterPosition = jobContext.getClusterPosition(jobInstance);
String trace = jobContext.getTrace(jobInstance);
jobContext.cancel(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.FAILED, jobInstance.getState());
assertEquals(0, clusterPosition);
assertTrue("Unexpected trace:\n" + trace, trace.contains("java.util.concurrent.CountDownLatch"));
}
@Test
public void testLongRunningTakeOver() throws Exception {
// GIVEN
MutableClusterStateManager clusterStateManager = new MutableClusterStateManager();
CountDownLatch processorEnterLatch = new CountDownLatch(1);
CountDownLatch processorLatch = new CountDownLatch(1);
this.jobContext = builder()
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
processorEnterLatch.countDown();
try {
processorLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return null;
})))
.withActorContextBuilder(ActorContext.builder().withClusterStateManager(clusterStateManager)).createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance(true);
jobInstance.setState(JobInstanceState.RUNNING);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
clusterStateManager.fireClusterStateChanged();
processorEnterLatch.await();
int clusterPosition = jobContext.getClusterPosition(jobInstance);
String trace = jobContext.getTrace(jobInstance);
jobContext.cancel(jobInstance);
// THEN
await();
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.FAILED, jobInstance.getState());
assertEquals(0, clusterPosition);
assertTrue("Unexpected trace:\n" + trace, trace.contains("java.util.concurrent.CountDownLatch"));
}
@Test
public void testLongRunningSkipAlreadyRunning() throws Exception {
// GIVEN
MutableClusterStateManager clusterStateManager = new MutableClusterStateManager();
CountDownLatch processorEnterLatch = new CountDownLatch(1);
this.jobContext = builder()
.withJobInstanceProcessorFactory(JobInstanceProcessorFactory.of(((jobInstance, context) -> {
processorEnterLatch.countDown();
return null;
})))
.withActorContextBuilder(ActorContext.builder().withClusterStateManager(clusterStateManager)).createContext();
SimpleJobInstance jobInstance = new SimpleJobInstance(true);
jobInstance.setState(JobInstanceState.RUNNING);
jobContext.getJobManager().addJobInstance(jobInstance);
// WHEN
clusterStateManager.eventFunction = event -> Collections.singletonMap(null, new SimpleFuture<>(new int[]{ 1 }));
clusterStateManager.fireClusterStateChanged();
// THEN
jobContext.stop(1, TimeUnit.MINUTES);
assertEquals(JobInstanceState.RUNNING, jobInstance.getState());
assertEquals(1, processorEnterLatch.getCount());
}
private static class MutableClusterStateManager implements ClusterStateManager, ClusterNodeInfo, LockService {
private final List<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
private final ConcurrentMap<String, Lock> locks = new ConcurrentHashMap<>();
private final Map<Class<?>, List<Consumer<Serializable>>> listeners = new ConcurrentHashMap<>();
private Function<StateReturningEvent<?>, Map<ClusterNodeInfo, Future<?>>> eventFunction = e -> Collections.emptyMap();
private boolean isCoordinator = true;
private long clusterVersion = 0L;
private int clusterPosition = 0;
private int clusterSize = 1;
public void fireClusterStateChanged() {
clusterStateListeners.forEach(l -> l.onClusterStateChanged(this));
}
@Override
public ClusterNodeInfo getCurrentNodeInfo() {
return this;
}
@Override
public void registerListener(ClusterStateListener listener) {
clusterStateListeners.add(listener);
listener.onClusterStateChanged(this);
}
@Override
public <T extends Serializable> void registerListener(Class<T> eventClass, java.util.function.Consumer<T> listener) {
listeners.computeIfAbsent(eventClass, k -> new CopyOnWriteArrayList<>()).add((java.util.function.Consumer<Serializable>) listener);
}
@Override
public Lock getLock(String name) {
return locks.computeIfAbsent(name, k -> new ReentrantLock());
}
@Override
public LockService getLockService() {
return this;
}
@Override
public boolean isStandalone() {
return true;
}
@Override
public void fireEventExcludeSelf(Serializable event, boolean await) {
// Noop because there is no cluster
}
@Override
public void fireEvent(Serializable event, boolean await) {
java.util.function.Consumer<Class<?>> consumer = eventClass -> {
List<java.util.function.Consumer<Serializable>> consumers = listeners.get(eventClass);
if (consumers != null) {
consumers.forEach(c -> c.accept(event));
}
};
Class<?> clazz = event.getClass();
Set<Class<?>> visitedClasses = new HashSet<>();
do {
consumer.accept(clazz);
visitInterfaces(consumer, clazz, visitedClasses);
clazz = clazz.getSuperclass();
} while (clazz != null);
}
@Override
public <T> Map<ClusterNodeInfo, Future<T>> fireEvent(StateReturningEvent<T> event) {
fireEvent((Serializable) event, false);
T result = event.getResult();
return Collections.singletonMap(this, new Future<T>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return true;
}
@Override
public T get() throws InterruptedException, ExecutionException {
return result;
}
@Override
public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return result;
}
});
}
@Override
public <T> Map<ClusterNodeInfo, Future<T>> fireEventExcludeSelf(StateReturningEvent<T> event) {
return (Map) eventFunction.apply(event);
}
public Function<StateReturningEvent<?>, Map<ClusterNodeInfo, Future<?>>> getEventFunction() {
return eventFunction;
}
public void setEventFunction(Function<StateReturningEvent<?>, Map<ClusterNodeInfo, Future<?>>> eventFunction) {
this.eventFunction = eventFunction;
}
private void visitInterfaces(java.util.function.Consumer<Class<?>> consumer, Class<?> clazz, Set<Class<?>> visitedClasses) {
Class<?>[] interfaces = clazz.getInterfaces();
for (int i = 0; i < interfaces.length; i++) {
Class<?> interfaceClass = interfaces[i];
if (visitedClasses.add(interfaceClass)) {
consumer.accept(interfaceClass);
visitInterfaces(consumer, interfaceClass, visitedClasses);
}
}
}
@Override
public boolean isCoordinator() {
return isCoordinator;
}
public void setCoordinator(boolean coordinator) {
isCoordinator = coordinator;
}
@Override
public long getClusterVersion() {
return clusterVersion;
}
public void setClusterVersion(long clusterVersion) {
this.clusterVersion = clusterVersion;
}
@Override
public int getClusterPosition() {
return clusterPosition;
}
public void setClusterPosition(int clusterPosition) {
this.clusterPosition = clusterPosition;
}
@Override
public int getClusterSize() {
return clusterSize;
}
public void setClusterSize(int clusterSize) {
this.clusterSize = clusterSize;
}
}
private static class SimpleFuture<T> implements Future<T> {
private final T result;
public SimpleFuture(T result) {
this.result = result;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return true;
}
@Override
public T get() throws InterruptedException, ExecutionException {
return result;
}
@Override
public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return result;
}
}
}
|
COBIT 5.0: Capability Level of Information Technology Directorate General of Treasury
Information system is one of the most important things that can help every companies to improve the performance of their company. The Directorate Generate of the Treasury is one of the government agencies that already use information systems to support their performance to handle the entire transaction of state budgeting and also they provides information systems to serve their users. That system must have a priority of the security and reliability. To ensure that the system has both of the priority, it is necessary to holding of the information systems auditing to ensure that capability level of their governance. The author doing the audit of information system using framework of COBIT 5.0 and doing measurements with Capability Level. EDM 01 (Ensure Governance Framework Setting and Maintenance) and EDM 02 (Ensure Benefits Delivery) are the two main processes of that government needs to do an audit, because they want to make sure that the system can running well and can deliver every benefits to their users. Based on the measurements that have been done using a capability level, both of the main processes are stalled on level 4 and could not reach the level of target on level 5 because there are some activities that cannot going well and can be inhibit the others process to reach their goals. |
/**
* Entity for storing @{@link Localized} fields of an arbitrary entity.
*
* @author Victor Zhivotikov
* @since 0.1
*/
@Entity
@Table(uniqueConstraints = @UniqueConstraint(columnNames = {"tableName", "instance", "locale", "field"}))
@DynamicInsert
@DynamicUpdate
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class LocalizedProperty implements Serializable {
private static final long serialVersionUID = -7994792168226645324L;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
private String tableName;
private String instance;
private Locale locale;
private String field;
private String value;
@Override
public String toString() {
return String.format("locale=%s, id=%s, %s.%s='%s'", locale, instance, tableName, field, value);
}
} |
<reponame>mengfj/cdi_compute<gh_stars>0
package com.ibm.cdi.api;
import java.net.URI;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import io.swagger.annotations.Api;
@Path("/")
@Api(hidden = true)
public class SwaggerService {
@GET
public Response swaggerRoot(String poststr){
URI uri=UriBuilder.fromUri("swagger/").build();
return Response.seeOther(uri).build();
}
}
|
// Upd updates node based on uid and changed fields, and unmarshals updated result into supplied obj
func (Simple) Upd(txn *ndgo.Txn, obj interface{}) (err error) {
if err = validateInput(obj); err != nil {
return err
}
dgType := getDgType(obj)
uid := updGetUIDSetUID(obj)
err = Stateless{}.Upd(txn, uid, dgType, obj)
if err != nil {
return err
}
return Stateless{}.GetByID(txn, uid, dgType, obj)
} |
<reponame>lifer84/pacbot<gh_stars>1000+
package com.tmobile.pacbot.azure.inventory.collector;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.microsoft.azure.PagedList;
import com.microsoft.azure.management.Azure;
import com.microsoft.azure.management.storage.StorageAccount;
import com.tmobile.pacbot.azure.inventory.auth.AzureCredentialProvider;
import com.tmobile.pacbot.azure.inventory.vo.BlobContainerVH;
import com.tmobile.pacbot.azure.inventory.vo.SubscriptionVH;
import com.tmobile.pacman.commons.utils.CommonUtils;
@Component
public class BlobContainerInventoryCollector {
@Autowired
AzureCredentialProvider azureCredentialProvider;
private String apiUrlTemplate = "https://management.azure.com/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s/blobServices/default/containers?api-version=2019-04-01";
private static Logger log = LoggerFactory.getLogger(BlobContainerInventoryCollector.class);
public List<BlobContainerVH> fetchBlobContainerDetails(SubscriptionVH subscription,Map<String, Map<String, String>> tagMap) {
List<BlobContainerVH> blobContainerList = new ArrayList<BlobContainerVH>();
String accessToken = azureCredentialProvider.getToken(subscription.getTenant());
Azure azure = azureCredentialProvider.getClient(subscription.getTenant(),subscription.getSubscriptionId());
PagedList<StorageAccount> storageAccounts = azure.storageAccounts().list();
for (StorageAccount storageAccount : storageAccounts) {
String url = String.format(apiUrlTemplate, URLEncoder.encode(subscription.getSubscriptionId()),
URLEncoder.encode(storageAccount.resourceGroupName()), URLEncoder.encode(storageAccount.name()));
try {
String response = CommonUtils.doHttpGet(url, "Bearer", accessToken);
JsonObject responseObj = new JsonParser().parse(response).getAsJsonObject();
JsonArray blobObjects = responseObj.getAsJsonArray("value");
for (JsonElement blobObjectElement : blobObjects) {
Map<String, String> tags= new HashMap<String, String>();
BlobContainerVH blobContainerVH = new BlobContainerVH();
blobContainerVH.setSubscription(subscription.getSubscriptionId());
blobContainerVH.setSubscriptionName(subscription.getSubscriptionName());
JsonObject blobObject = blobObjectElement.getAsJsonObject();
JsonObject properties = blobObject.getAsJsonObject("properties");
blobContainerVH.setId(blobObject.get("id").getAsString());
blobContainerVH.setName(blobObject.get("name").getAsString());
blobContainerVH.setType(blobObject.get("type").getAsString());
blobContainerVH.setTag(blobObject.get("etag").getAsString());
blobContainerVH.setTags(Util.tagsList(tagMap, storageAccount.resourceGroupName(), tags));
if (properties!=null) {
HashMap<String, Object> propertiesMap = new Gson().fromJson(properties.toString(),
HashMap.class);
blobContainerVH.setPropertiesMap(propertiesMap);
}
blobContainerList.add(blobContainerVH);
}
} catch (Exception e) {
log.error(" Error fetching blobcontainers for storage account {} Cause : {}" ,storageAccount.name(),e.getMessage());
}
}
log.info("Target Type : {} Total: {} ","Blob Container",blobContainerList.size());
return blobContainerList;
}
}
|
<reponame>dguder/Notepad3
// Scintilla source code edit control
/** @file UniqueString.h
** Define UniqueString, a unique_ptr based string type for storage in containers
** and an allocator for UniqueString.
** Define UniqueStringSet which holds a set of strings, used to avoid holding many copies
** of font names.
**/
// Copyright 2017 by <NAME> <<EMAIL>>
// The License.txt file describes the conditions under which this software may be distributed.
#ifndef UNIQUESTRING_H
#define UNIQUESTRING_H
namespace Scintilla::Internal {
constexpr bool IsNullOrEmpty(const char *text) noexcept {
return text == nullptr || *text == '\0';
}
using UniqueString = std::unique_ptr<const char[]>;
/// Equivalent to strdup but produces a std::unique_ptr<const char[]> allocation to go
/// into collections.
UniqueString UniqueStringCopy(const char *text);
// A set of strings that always returns the same pointer for each string.
class UniqueStringSet {
private:
std::vector<UniqueString> strings;
public:
UniqueStringSet() noexcept;
// UniqueStringSet objects can not be copied.
UniqueStringSet(const UniqueStringSet &) = delete;
UniqueStringSet &operator=(const UniqueStringSet &) = delete;
// UniqueStringSet objects can be moved.
UniqueStringSet(UniqueStringSet &&) = default;
UniqueStringSet &operator=(UniqueStringSet &&) = default;
~UniqueStringSet();
void Clear() noexcept;
const char *Save(const char *text);
};
}
#endif
|
<filename>Pods/OpenVPNAdapter/Sources/OpenVPN3/openvpn/common/split.hpp
// OpenVPN -- An application to securely tunnel IP networks
// over a single port, with support for SSL/TLS-based
// session authentication and key exchange,
// packet encryption, packet authentication, and
// packet compression.
//
// Copyright (C) 2012-2017 OpenVPN Inc.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License Version 3
// as published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program in the COPYING file.
// If not, see <http://www.gnu.org/licenses/>.
// General string-splitting methods. These methods along with lexical analyzer
// classes (such as those defined in lex.hpp and OptionList::LexComment) can be
// used as a basis for parsers.
#ifndef OPENVPN_COMMON_SPLIT_H
#define OPENVPN_COMMON_SPLIT_H
#include <string>
#include <vector>
#include <utility>
#include <openvpn/common/size.hpp>
#include <openvpn/common/lex.hpp>
namespace openvpn {
namespace Split {
enum {
TRIM_LEADING_SPACES=(1<<0),
TRIM_SPECIAL=(1<<1), // trims quotes (but respects their content)
};
struct NullLimit
{
void add_term() {}
};
// Split a string using a character (such as ',') as a separator.
// Types:
// V : string vector of return data
// LEX : lexical analyzer class such as StandardLex
// LIM : limit class such as OptionList::Limits
// Args:
// ret : return data -- a list of strings
// input : input string to be split
// split_by : separator
// flags : TRIM_LEADING_SPACES, TRIM_SPECIAL
// max_terms : the size of the returned string list will be, at most, this value + 1. Pass
// ~0 to disable.
// lim : an optional limits object such as OptionList::Limits
template <typename V, typename LEX, typename LIM>
inline void by_char_void(V& ret, const std::string& input, const char split_by, const unsigned int flags=0, const unsigned int max_terms=~0, LIM* lim=nullptr)
{
LEX lex;
unsigned int nterms = 0;
std::string term;
for (std::string::const_iterator i = input.begin(); i != input.end(); ++i)
{
const char c = *i;
lex.put(c);
if (!lex.in_quote() && c == split_by && nterms < max_terms)
{
if (lim)
lim->add_term();
ret.push_back(std::move(term));
++nterms;
term = "";
}
else if ((!(flags & TRIM_SPECIAL) || lex.available())
&& (!(flags & TRIM_LEADING_SPACES) || !term.empty() || !SpaceMatch::is_space(c)))
term += c;
}
if (lim)
lim->add_term();
ret.push_back(std::move(term));
}
// convenience method that returns data rather than modifying an in-place argument
template <typename V, typename LEX, typename LIM>
inline V by_char(const std::string& input, const char split_by, const unsigned int flags=0, const unsigned int max_terms=~0, LIM* lim=nullptr)
{
V ret;
by_char_void<V, LEX, LIM>(ret, input, split_by, flags, max_terms, lim);
return ret;
}
// Split a string using spaces as a separator.
// Types:
// V : string vector of return data
// LEX : lexical analyzer class such as StandardLex
// SPACE : class that we use to differentiate between space and non-space chars
// LIM : limit class such as OptionList::Limits
// Args:
// ret : return data -- a list of strings
// input : input string to be split
// lim : an optional limits object such as OptionList::Limits
template <typename V, typename LEX, typename SPACE, typename LIM>
inline void by_space_void(V& ret, const std::string& input, LIM* lim=nullptr)
{
LEX lex;
std::string term;
bool defined = false;
for (std::string::const_iterator i = input.begin(); i != input.end(); ++i)
{
const char c = *i;
lex.put(c);
if (lex.in_quote())
defined = true;
if (lex.available())
{
const char tc = lex.get();
if (!SPACE::is_space(tc) || lex.in_quote())
{
defined = true;
term += tc;
}
else if (defined)
{
if (lim)
lim->add_term();
ret.push_back(std::move(term));
term = "";
defined = false;
}
}
}
if (defined)
{
if (lim)
lim->add_term();
ret.push_back(std::move(term));
}
}
// convenience method that returns data rather than modifying an in-place argument
template <typename V, typename LEX, typename SPACE, typename LIM>
inline V by_space(const std::string& input, LIM* lim=nullptr)
{
V ret;
by_space_void<V, LEX, SPACE, LIM>(ret, input, lim);
return ret;
}
}
} // namespace openvpn
#endif // OPENVPN_COMMON_SPLIT_H
|
def createMPlugAndUsdAttribute(self, sdfValueType, nodeName, stage, primPath):
plugAndAttrName = "my"+str(sdfValueType).replace('[]','Array');
cmds.group(name=nodeName, empty=True)
plug = mayaUsdLib.ReadUtil.FindOrCreateMayaAttr(
sdfValueType,
Sdf.VariabilityUniform,
nodeName,
plugAndAttrName)
self.assertEqual(cmds.attributeQuery(plugAndAttrName, n=nodeName, nn=True), plugAndAttrName)
p = stage.OverridePrim(primPath)
self.assertTrue(p)
attr = p.CreateAttribute(plugAndAttrName, sdfValueType)
return plug, attr |
<reponame>etf1/health-check
package healthcheck
import (
"errors"
"fmt"
"github.com/gomodule/redigo/redis"
"time"
)
/*
checks:
- connection (Pool)
- closeTest connection
- ping
*/
func RedisCheck(url string) func() error {
return func() (err error) {
pool := &redis.Pool{
MaxIdle: 1,
IdleTimeout: 10 * time.Second,
Dial: func() (redis.Conn, error) { return redis.Dial("tcp", url)},
}
conn := pool.Get()
defer func(error) {
connErr := conn.Close()
if err != nil && connErr != nil {
err = errors.New(fmt.Sprintf("%s\ndb close error: %s", err.Error(), connErr.Error()))
} else if connErr != nil {
err = errors.New("db close error: "+connErr.Error())
}
}(err)
data, err := conn.Do("PING")
if err != nil {
return errors.New(fmt.Sprintf("%s: %s", "redis ping failed", err.Error()))
}
if data == nil {
return errors.New("empty response for redis ping")
}
if data != "PONG" {
return errors.New(fmt.Sprintf("%s: %s", "unexpected response for redis ping", data))
}
data, err = conn.Do("APPEND", "key", "value")
if err != nil {
return errors.New(fmt.Sprintf("%s: %s", "redis append failed", data))
}
return nil
}
}
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.build.gradle.internal;
import com.android.Version;
import com.android.annotations.NonNull;
import com.android.annotations.Nullable;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.math.BigInteger;
import java.net.URLClassLoader;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.time.LocalDate;
import java.time.Period;
import java.time.format.DateTimeFormatter;
import java.util.jar.Manifest;
public final class NonFinalPluginExpiry {
/**
* default retirement age in days since its inception date for RC or beta versions.
*/
@VisibleForTesting static final Period DEFAULT_RETIREMENT_AGE_FOR_NON_RELEASE =
Period.ofDays(40);
private NonFinalPluginExpiry() {
}
/**
* Verify that this plugin execution is within its public time range.
*
* @throws RuntimeException if the plugin is a non final plugin older than 40 days.
*/
public static void verifyRetirementAge() {
// disable the time bomb for now.
if (true) return;
URLClassLoader cl = (URLClassLoader) NonFinalPluginExpiry.class.getClassLoader();
try (InputStream inputStream = cl.findResource("META-INF/MANIFEST.MF").openStream()) {
verifyRetirementAge(
LocalDate.now(),
new Manifest(inputStream),
System.getenv("ANDROID_DAILY_OVERRIDE"));
} catch (IOException ignore) {}
}
@VisibleForTesting
static void verifyRetirementAge(
@NonNull LocalDate now,
@NonNull Manifest manifest,
@Nullable String dailyOverride) {
String version = manifest.getMainAttributes().getValue("Plugin-Version");
Period retirementAge = getRetirementAge(version);
// if this plugin version will never be outdated, return.
if (retirementAge == null) {
return;
}
String inceptionDateAttr = manifest.getMainAttributes().getValue("Inception-Date");
// when running in unit tests, etc... the manifest entries are absent.
if (inceptionDateAttr == null) {
return;
}
LocalDate inceptionDate =
LocalDate.parse(inceptionDateAttr, DateTimeFormatter.ISO_LOCAL_DATE);
LocalDate expiryDate = inceptionDate.plus(retirementAge);
if (now.compareTo(expiryDate) > 0) {
// this plugin is too old.
final MessageDigest crypt;
try {
crypt = MessageDigest.getInstance("SHA-1");
} catch (NoSuchAlgorithmException e) {
return;
}
crypt.reset();
// encode the day, not the current time.
try {
crypt.update(
String.format(
"%1$s:%2$s:%3$s",
now.getYear(),
now.getMonthValue() -1,
now.getDayOfMonth())
.getBytes("utf8"));
} catch (UnsupportedEncodingException e) {
return;
}
String overrideValue = new BigInteger(1, crypt.digest()).toString(16);
if (dailyOverride == null) {
String message =
String.format(
"The android gradle plugin version %1$s is too old, "
+ "please update to the latest version.\n"
+ "\n"
+ "To override this check from the command line please "
+ "set the ANDROID_DAILY_OVERRIDE environment variable to "
+ "\"%2$s\"",
version,
overrideValue);
System.err.println(message);
throw new AndroidGradlePluginTooOldException(message);
} else {
// allow a version specific override.
String versionOverride =
String.valueOf(Version.ANDROID_GRADLE_PLUGIN_VERSION.hashCode());
if (dailyOverride.equals(overrideValue) || dailyOverride.equals(versionOverride)) {
return;
}
String message =
String.format(
"The android gradle plugin version %1$s is too old,"
+ "please update to the latest version.\n"
+ "\n"
+ "The ANDROID_DAILY_OVERRIDE value is outdated. "
+ "Please set the ANDROID_DAILY_OVERRIDE environment "
+ "variable to \"%2$s\"",
version, overrideValue);
System.err.println(message);
throw new AndroidGradlePluginTooOldException(message);
}
}
}
/**
* Returns the retirement age for this plugin depending on its version string, or null if this
* plugin version will never become obsolete
*
* @param version the plugin full version, like 1.3.4-preview5 or 1.0.2 or 1.2.3-beta4
*/
@Nullable
private static Period getRetirementAge(@Nullable String version) {
if (version == null
|| version.contains("rc")
|| version.contains("beta")
|| version.contains("alpha")
|| version.contains("preview")) {
return DEFAULT_RETIREMENT_AGE_FOR_NON_RELEASE;
}
return null;
}
public static final class AndroidGradlePluginTooOldException extends RuntimeException {
public AndroidGradlePluginTooOldException(@NonNull String message) {
super(message);
}
}
}
|
/**
* Critical user journeys with which to exercise the system, driven from the
* host.
*/
public class Cujs {
private Device mDevice;
public Cujs(Device device) {
this.mDevice = device;
}
/**
* Runs the critical user journeys.
*/
public void run() throws TestException {
// Do an explicit GC in the system server process as part of the test
// case to reduce GC-related sources of noise.
// SIGUSR1 = 10 is the magic signal to trigger the GC.
int pid = mDevice.getProcessPid("system_server");
mDevice.executeShellCommand("kill -10 " + pid);
// Invoke the Device Cujs instrumentation to run the cujs.
// TODO: Consider exercising the system in other interesting ways as
// well.
String command = "am instrument -w com.android.tests.sysmem.device/.Cujs";
mDevice.executeShellCommand(command);
}
} |
/**
* Grab a resource from the front of the resource list.
* Assumes: that the reslist is locked.
*/
static apr_res_t *pop_resource(apr_reslist_t *reslist)
{
apr_res_t *res;
res = APR_RING_FIRST(&reslist->avail_list);
APR_RING_REMOVE(res, link);
reslist->nidle--;
return res;
} |
Electromechanical Equipment for Integrated Use of Power Potential of Hydroelectric Power Plant Reservoirs
The energy feasibility of using the heat and mechanical potentials of hydroelectric power stations water was determined using the developed expression of the energy efficiency criterion. Using the developed criterion were determined the areas and degree of application of combined electromechanical equipment, which excludes the repeated interconversion of electrical and mechanical energies. |
package io.zero88.qwe;
import io.zero88.qwe.crypto.CryptoRequest;
import lombok.NonNull;
/**
* Represents for a {@code Plugin} that keeps a single duty function.
*
* @param <C> Type of Plugin Config
* @param <T> Type of Plugin Context
* @see PluginConfig
* @see PluginContext
* @see PluginVerticle
*/
public interface Plugin<C extends PluginConfig, T extends PluginContext>
extends QWEVerticle<C>, HasPluginName, HasConfigKey, CryptoRequest {
/**
* Expresses a functional that this plugin brings to. For example: {@code http-server}, {@code sql-mysql}
*
* @return the plugin function name
* @apiNote To better identify, {@code plugin name} convention is {@code kebab-case}
*/
@Override
default String pluginName() {
return this.getClass().getSimpleName();
}
/**
* Defines a particular deployment options for plugin.
* <p>
* If a {@code plugin} want to declare its deployment options, the config must be used this key and attach under
* {@link QWEAppConfig}
*
* @return a deployment key
*/
default String deploymentKey() {
return PluginConfig.PLUGIN_DEPLOY_CONFIG_KEY + configKey();
}
/**
* Plugin config
*
* @return a plugin config
*/
C pluginConfig();
/**
* Plugin context
*
* @return a plugin context
*/
@NonNull T pluginContext();
/**
* Declares a deployment hook that helps {@code Application} inject a general context information into {@code
* Plugin}
*
* @return a deployment hook
* @see PluginDeployHook
* @see Application#installPlugins()
*/
@NonNull PluginDeployHook deployHook();
/**
* Register a pre-plugin-context before installing {@code Plugin} and a post-plugin-context after installed {@code
* Plugin}
* <p>
* This method will be called 2 times automatically in application deployment workflow that thanks to {@link
* #deployHook()}
*
* @param context shared data key
* @return a reference to this, so the API can be used fluently
* @see Application
*/
Plugin<C, T> setup(T context);
@Override
default @NonNull SharedDataLocalProxy sharedData() {
return pluginContext().sharedData();
}
}
|
import Data.Char
myprocess :: [Char] -> [Char]
myprocess [] = []
myprocess (c:left) =
if c `elem` ['a','e','i','o','u','y']
then myprocess left
else ['.',c] ++ myprocess left
main = do
str <- getLine
putStrLn $ myprocess $ map toLower str
|
def related_logentries(self, instance):
return LogEntry.objects.filter(
attached_to_object_type=ContentType.objects.get_for_model(type(instance)),
attached_to_object_id=instance.pk,
) |
A = [i for i in input().split()]
A.sort()
s = "".join(A)
YN = lambda b: print('YES') if b else print('NO')
YN( s == "557" )
|
Hey there, time traveller!
This article was published 17/9/2014 (1623 days ago), so information in it may no longer be current.
Winnipeg’s leading business organization says the city's residents are still waiting for the mayoral candidates to outline their vision for the city.
Loren Remillard, the Winnipeg Chamber of Commerce's vice president of policy and public affairs, said the mayoral candidates have talked non-stop about the changes they’ll make, but none of them has said what Winnipeg should become.
“As a city, we don’t know where we’re going and for what reasons. All (election promises) are is a collection of ideas that we’re hearing from candidates,” Remillard said following the chamber’s release Wednesday morning of a package of ideas on how to reform city hall.
Remillard said the most important issue the mayoral candidates need to address is their vision for the city. |
def entity_to_pb_v1(self, entity):
v3_entity = self.entity_to_pb(entity)
v1_entity = googledatastore.Entity()
self._entity_converter.v3_to_v1_entity(v3_entity, v1_entity)
return v1_entity |
/**
* An extensible {@link Concourse} wrapper that simply forwards method calls to
* another {@link Concourse} instance.
* <p>
* This class is meant to be extended by a subclass that needs to override a
* <strong>subset</strong> of methods to provide additional functionality before
* or after the Concourse method execution. For example, an extension of this
* class can be used to cache the return value of some Concourse methods and
* check that cache before performing a database call.
* </p>
*
* @author Jeff Nelson
*/
@SuppressWarnings("deprecation")
public abstract class ForwardingConcourse extends Concourse {
/**
* The instance to which method invocations are routed.
*/
private final Concourse concourse;
/**
* Construct a new instance.
*
* @param concourse
*/
public ForwardingConcourse(Concourse concourse) {
this.concourse = concourse;
}
@Override
public void abort() {
concourse.abort();
}
@Override
public <T> long add(String key, T value) {
return concourse.add(key, value);
}
@Override
public <T> Map<Long, Boolean> add(String key, T value,
Collection<Long> records) {
return concourse.add(key, value, records);
}
@Override
public <T> boolean add(String key, T value, long record) {
return concourse.add(key, value, record);
}
@Override
public Map<Timestamp, String> audit(long record) {
return concourse.audit(record);
}
@Override
public Map<Timestamp, String> audit(long record, Timestamp start) {
return concourse.audit(record, start);
}
@Override
public Map<Timestamp, String> audit(long record, Timestamp start,
Timestamp end) {
return concourse.audit(record, start, end);
}
@Override
public Map<Timestamp, String> audit(String key, long record) {
return concourse.audit(key, record);
}
@Override
public Map<Timestamp, String> audit(String key, long record,
Timestamp start) {
return concourse.audit(key, record, start);
}
@Override
public Map<Timestamp, String> audit(String key, long record,
Timestamp start, Timestamp end) {
return concourse.audit(key, record, start, end);
}
@Override
public Map<String, Map<Object, Set<Long>>> browse(Collection<String> keys) {
return concourse.browse(keys);
}
@Override
public Map<String, Map<Object, Set<Long>>> browse(Collection<String> keys,
Timestamp timestamp) {
return concourse.browse(keys, timestamp);
}
@Override
public Map<Object, Set<Long>> browse(String key) {
return concourse.browse(key);
}
@Override
public Map<Object, Set<Long>> browse(String key, Timestamp timestamp) {
return concourse.browse(key, timestamp);
}
@Override
public <T> Map<Timestamp, Set<T>> chronologize(String key, long record) {
return concourse.chronologize(key, record);
}
@Override
public <T> Map<Timestamp, Set<T>> chronologize(String key, long record,
Timestamp start) {
return concourse.chronologize(key, record, start);
}
@Override
public <T> Map<Timestamp, Set<T>> chronologize(String key, long record,
Timestamp start, Timestamp end) {
return concourse.chronologize(key, record, start, end);
}
@Override
public void clear(Collection<Long> records) {
concourse.clear(records);
}
@Override
public void clear(Collection<String> keys, Collection<Long> records) {
concourse.clear(keys, records);
}
@Override
public void clear(Collection<String> keys, long record) {
concourse.clear(keys, record);
}
@Override
public void clear(long record) {
concourse.clear(record);
}
@Override
public void clear(String key, Collection<Long> records) {
concourse.clear(key, records);
}
@Override
public void clear(String key, long record) {
concourse.clear(key, record);
}
@Override
public boolean commit() {
return concourse.commit();
}
@Override
public boolean consolidate(long first, long second, long... remaining) {
return concourse.consolidate(first, second, remaining);
}
@Override
public Set<String> describe() {
return concourse.describe();
}
@Override
public Map<Long, Set<String>> describe(Collection<Long> records) {
return concourse.describe(records);
}
@Override
public Map<Long, Set<String>> describe(Collection<Long> records,
Timestamp timestamp) {
return concourse.describe(records, timestamp);
}
@Override
public Set<String> describe(long record) {
return concourse.describe(record);
}
@Override
public Set<String> describe(long record, Timestamp timestamp) {
return concourse.describe(record, timestamp);
}
@Override
public Set<String> describe(Timestamp timestamp) {
return concourse.describe(timestamp);
}
@Override
public <T> Map<String, Map<Diff, Set<T>>> diff(long record,
Timestamp start) {
return concourse.diff(record, start);
}
@Override
public <T> Map<String, Map<Diff, Set<T>>> diff(long record, Timestamp start,
Timestamp end) {
return concourse.diff(record, start, end);
}
@Override
public <T> Map<Diff, Set<T>> diff(String key, long record,
Timestamp start) {
return concourse.diff(key, record, start);
}
@Override
public <T> Map<Diff, Set<T>> diff(String key, long record, Timestamp start,
Timestamp end) {
return concourse.diff(key, record, start, end);
}
@Override
public <T> Map<T, Map<Diff, Set<Long>>> diff(String key, Timestamp start) {
return concourse.diff(key, start);
}
@Override
public <T> Map<T, Map<Diff, Set<Long>>> diff(String key, Timestamp start,
Timestamp end) {
return concourse.diff(key, start, end);
}
@Override
public void exit() {
concourse.exit();
}
@Override
public Set<Long> find(Criteria criteria) {
return concourse.find(criteria);
}
@Override
public Set<Long> find(Criteria criteria, Order order) {
return concourse.find(criteria, order);
}
@Override
public Set<Long> find(Criteria criteria, Order order, Page page) {
return concourse.find(criteria, order, page);
}
@Override
public Set<Long> find(Criteria criteria, Page page) {
return concourse.find(criteria, page);
}
@Override
public Set<Long> find(String ccl) {
return concourse.find(ccl);
}
@Override
public Set<Long> find(String key, Object value) {
return concourse.find(key, value);
}
@Override
public Set<Long> find(String key, Object value, Order order) {
return concourse.find(key, value, order);
}
@Override
public Set<Long> find(String key, Object value, Order order, Page page) {
return concourse.find(key, value, order, page);
}
@Override
public Set<Long> find(String key, Object value, Page page) {
return concourse.find(key, value, page);
}
@Override
public Set<Long> find(String key, Object value, Timestamp timestamp) {
return concourse.find(key, value, timestamp);
}
@Override
public Set<Long> find(String key, Object value, Timestamp timestamp,
Order order) {
return concourse.find(key, value, timestamp, order);
}
@Override
public Set<Long> find(String key, Object value, Timestamp timestamp,
Order order, Page page) {
return concourse.find(key, value, timestamp, order, page);
}
@Override
public Set<Long> find(String key, Object value, Timestamp timestamp,
Page page) {
return concourse.find(key, value, timestamp, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value) {
return concourse.find(key, operator, value);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2) {
return concourse.find(key, operator, value, value2);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Order order) {
return concourse.find(key, operator, value, value2, order);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Order order, Page page) {
return concourse.find(key, operator, value, value2, order, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Page page) {
return concourse.find(key, operator, value, value2, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Timestamp timestamp) {
return concourse.find(key, operator, value, value2, timestamp);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Timestamp timestamp, Order order) {
return concourse.find(key, operator, value, value2, timestamp, order);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Timestamp timestamp, Order order, Page page) {
return concourse.find(key, operator, value, value2, timestamp, order,
page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Object value2, Timestamp timestamp, Page page) {
return concourse.find(key, operator, value, value2, timestamp, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Order order) {
return concourse.find(key, operator, value, order);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Order order, Page page) {
return concourse.find(key, operator, value, order, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Page page) {
return concourse.find(key, operator, value, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Timestamp timestamp) {
return concourse.find(key, operator, value, timestamp);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Timestamp timestamp, Order order) {
return concourse.find(key, operator, value, timestamp, order);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Timestamp timestamp, Order order, Page page) {
return concourse.find(key, operator, value, timestamp, order, page);
}
@Override
public Set<Long> find(String key, Operator operator, Object value,
Timestamp timestamp, Page page) {
return concourse.find(key, operator, value, timestamp, page);
}
@Override
public Set<Long> find(String ccl, Order order) {
return concourse.find(ccl, order);
}
@Override
public Set<Long> find(String ccl, Order order, Page page) {
return concourse.find(ccl, order, page);
}
@Override
public Set<Long> find(String ccl, Page page) {
return concourse.find(ccl, page);
}
@Override
public Set<Long> find(String key, String operator, Object value) {
return concourse.find(key, operator, value);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2) {
return concourse.find(key, operator, value, value2);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Order order) {
return concourse.find(key, operator, value, value2, order);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Order order, Page page) {
return concourse.find(key, operator, value, value2, order, page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Page page) {
return concourse.find(key, operator, value, value2, page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Timestamp timestamp) {
return concourse.find(key, operator, value, value2, timestamp);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Timestamp timestamp, Order order) {
return concourse.find(key, operator, value, value2, timestamp, order);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Timestamp timestamp, Order order, Page page) {
return concourse.find(key, operator, value, value2, timestamp, order,
page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Object value2, Timestamp timestamp, Page page) {
return concourse.find(key, operator, value, value2, timestamp, page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Order order) {
return concourse.find(key, operator, value, order);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Order order, Page page) {
return concourse.find(key, operator, value, order, page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Page page) {
return concourse.find(key, operator, value, page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Timestamp timestamp) {
return concourse.find(key, operator, value, timestamp);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Timestamp timestamp, Order order) {
return concourse.find(key, operator, value, timestamp, order);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Timestamp timestamp, Order order, Page page) {
return concourse.find(key, operator, value, timestamp, order, page);
}
@Override
public Set<Long> find(String key, String operator, Object value,
Timestamp timestamp, Page page) {
return concourse.find(key, operator, value, timestamp, page);
}
@Override
public <T> long findOrAdd(String key, T value)
throws DuplicateEntryException {
return concourse.findOrAdd(key, value);
}
@Override
public long findOrInsert(Criteria criteria, String json)
throws DuplicateEntryException {
return concourse.findOrInsert(criteria, json);
}
@Override
public long findOrInsert(String ccl, String json)
throws DuplicateEntryException {
return concourse.findOrInsert(ccl, json);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records) {
return concourse.get(keys, records);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Order order) {
return concourse.get(keys, records, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Order order, Page page) {
return concourse.get(keys, records, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Page page) {
return concourse.get(keys, records, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Timestamp timestamp) {
return concourse.get(keys, records, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Timestamp timestamp, Order order) {
return concourse.get(keys, records, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Timestamp timestamp, Order order,
Page page) {
return concourse.get(keys, records, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Collection<Long> records, Timestamp timestamp, Page page) {
return concourse.get(keys, records, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria) {
return concourse.get(keys, criteria);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Order order) {
return concourse.get(keys, criteria, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Order order, Page page) {
return concourse.get(keys, criteria, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Page page) {
return concourse.get(keys, criteria, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Timestamp timestamp) {
return concourse.get(keys, criteria, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Timestamp timestamp, Order order) {
return concourse.get(keys, criteria, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Timestamp timestamp, Order order, Page page) {
return concourse.get(keys, criteria, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
Criteria criteria, Timestamp timestamp, Page page) {
return concourse.get(keys, criteria, timestamp, page);
}
@Override
public <T> Map<String, T> get(Collection<String> keys, long record) {
return concourse.get(keys, record);
}
@Override
public <T> Map<String, T> get(Collection<String> keys, long record,
Timestamp timestamp) {
return concourse.get(keys, record, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl) {
return concourse.get(keys, ccl);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Order order) {
return concourse.get(keys, ccl, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Order order, Page page) {
return concourse.get(keys, ccl, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Page page) {
return concourse.get(keys, ccl, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Timestamp timestamp) {
return concourse.get(keys, ccl, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Timestamp timestamp, Order order) {
return concourse.get(keys, ccl, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Timestamp timestamp, Order order, Page page) {
return concourse.get(keys, ccl, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Collection<String> keys,
String ccl, Timestamp timestamp, Page page) {
return concourse.get(keys, ccl, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria) {
return concourse.get(criteria);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria, Order order) {
return concourse.get(criteria, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria, Order order,
Page page) {
return concourse.get(criteria, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria, Page page) {
return concourse.get(criteria, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria,
Timestamp timestamp) {
return concourse.get(criteria, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria,
Timestamp timestamp, Order order) {
return concourse.get(criteria, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria,
Timestamp timestamp, Order order, Page page) {
return concourse.get(criteria, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(Criteria criteria,
Timestamp timestamp, Page page) {
return concourse.get(criteria, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl) {
return concourse.get(ccl);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records) {
return concourse.get(key, records);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Order order) {
return concourse.get(key, records, order);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Order order, Page page) {
return concourse.get(key, records, order, page);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Page page) {
return concourse.get(key, records, page);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Timestamp timestamp) {
return concourse.get(key, records, timestamp);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Timestamp timestamp, Order order) {
return concourse.get(key, records, timestamp, order);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Timestamp timestamp, Order order, Page page) {
return concourse.get(key, records, timestamp, order, page);
}
@Override
public <T> Map<Long, T> get(String key, Collection<Long> records,
Timestamp timestamp, Page page) {
return concourse.get(key, records, timestamp, page);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria) {
return concourse.get(key, criteria);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria, Order order) {
return concourse.get(key, criteria, order);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria, Order order,
Page page) {
return concourse.get(key, criteria, order, page);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria, Page page) {
return concourse.get(key, criteria, page);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria,
Timestamp timestamp) {
return concourse.get(key, criteria, timestamp);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria,
Timestamp timestamp, Order order) {
return concourse.get(key, criteria, timestamp, order);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria,
Timestamp timestamp, Order order, Page page) {
return concourse.get(key, criteria, timestamp, order, page);
}
@Override
public <T> Map<Long, T> get(String key, Criteria criteria,
Timestamp timestamp, Page page) {
return concourse.get(key, criteria, timestamp, page);
}
@Override
public <T> T get(String key, long record) {
return concourse.get(key, record);
}
@Override
public <T> T get(String key, long record, Timestamp timestamp) {
return concourse.get(key, record, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Order order) {
return concourse.get(ccl, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Order order,
Page page) {
return concourse.get(ccl, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Page page) {
return concourse.get(ccl, page);
}
@Override
public <T> Map<Long, T> get(String key, String ccl) {
return concourse.get(key, ccl);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Order order) {
return concourse.get(key, ccl, order);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Order order,
Page page) {
return concourse.get(key, ccl, order);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Page page) {
return concourse.get(key, ccl, page);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Timestamp timestamp) {
return concourse.get(key, ccl, timestamp);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Timestamp timestamp,
Order order) {
return concourse.get(key, ccl, timestamp, order);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Timestamp timestamp,
Order order, Page page) {
return concourse.get(key, ccl, timestamp, order, page);
}
@Override
public <T> Map<Long, T> get(String key, String ccl, Timestamp timestamp,
Page page) {
return concourse.get(key, ccl, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Timestamp timestamp) {
return concourse.get(ccl, timestamp);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Timestamp timestamp,
Order order) {
return concourse.get(ccl, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Timestamp timestamp,
Order order, Page page) {
return concourse.get(ccl, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, T>> get(String ccl, Timestamp timestamp,
Page page) {
return concourse.get(ccl, timestamp, page);
}
@Override
public String getServerEnvironment() {
return concourse.getServerEnvironment();
}
@Override
public String getServerVersion() {
return concourse.getServerVersion();
}
@Override
public Set<Long> insert(String json) {
return concourse.insert(json);
}
@Override
public Map<Long, Boolean> insert(String json, Collection<Long> records) {
return concourse.insert(json, records);
}
@Override
public boolean insert(String json, long record) {
return concourse.insert(json, record);
}
@Override
public Set<Long> inventory() {
return concourse.inventory();
}
@Override
public <T> T invokePlugin(String id, String method, Object... args) {
return concourse.invokePlugin(id, method, args);
}
@Override
public String jsonify(Collection<Long> records) {
return concourse.jsonify(records);
}
@Override
public String jsonify(Collection<Long> records, boolean identifier) {
return concourse.jsonify(records, identifier);
}
@Override
public String jsonify(Collection<Long> records, Timestamp timestamp) {
return concourse.jsonify(records, timestamp);
}
@Override
public String jsonify(Collection<Long> records, Timestamp timestamp,
boolean identifier) {
return concourse.jsonify(records, timestamp, identifier);
}
@Override
public String jsonify(long record) {
return concourse.jsonify(record);
}
@Override
public String jsonify(long record, boolean identifier) {
return concourse.jsonify(record, identifier);
}
@Override
public String jsonify(long record, Timestamp timestamp) {
return concourse.jsonify(record, timestamp);
}
@Override
public String jsonify(long record, Timestamp timestamp,
boolean identifier) {
return concourse.jsonify(record, timestamp, identifier);
}
@Override
public Map<Long, Boolean> link(String key, Collection<Long> destinations,
long source) {
return concourse.link(key, destinations, source);
}
@Override
public boolean link(String key, long destination, long source) {
return concourse.link(key, destination, source);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
Collection<Long> records) {
return concourse.navigate(keys, records);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
Collection<Long> records, Timestamp timestamp) {
return concourse.navigate(keys, records, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
Criteria criteria) {
return concourse.navigate(keys, criteria);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
Criteria criteria, Timestamp timestamp) {
return concourse.navigate(keys, criteria, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
long record) {
return concourse.navigate(keys, record);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
long record, Timestamp timestamp) {
return concourse.navigate(keys, record, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
String ccl) {
return concourse.navigate(keys, ccl);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> navigate(Collection<String> keys,
String ccl, Timestamp timestamp) {
return concourse.navigate(keys, ccl, timestamp);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key,
Collection<Long> records) {
return concourse.navigate(key, records);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, Collection<Long> records,
Timestamp timestamp) {
return concourse.navigate(key, records, timestamp);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, Criteria criteria) {
return concourse.navigate(key, criteria);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, Criteria criteria,
Timestamp timestamp) {
return concourse.navigate(key, criteria, timestamp);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, long record) {
return concourse.navigate(key, record);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, long record,
Timestamp timestamp) {
return concourse.navigate(key, record, timestamp);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, String ccl) {
return concourse.navigate(key, ccl);
}
@Override
public <T> Map<Long, Set<T>> navigate(String key, String ccl,
Timestamp timestamp) {
return concourse.navigate(key, ccl, timestamp);
}
@Override
public Map<Long, Boolean> ping(Collection<Long> records) {
return concourse.ping(records);
}
@Override
public boolean ping(long record) {
return concourse.ping(record);
}
@Override
public <T> void reconcile(String key, long record, Collection<T> values) {
concourse.reconcile(key, record, values);
}
@Override
public <T> Map<Long, Boolean> remove(String key, T value,
Collection<Long> records) {
return concourse.remove(key, value, records);
}
@Override
public <T> boolean remove(String key, T value, long record) {
return concourse.remove(key, value, record);
}
@Override
public void revert(Collection<String> keys, Collection<Long> records,
Timestamp timestamp) {
concourse.revert(keys, records, timestamp);
}
@Override
public void revert(Collection<String> keys, long record,
Timestamp timestamp) {
concourse.revert(keys, record, timestamp);
}
@Override
public void revert(String key, Collection<Long> records,
Timestamp timestamp) {
concourse.revert(key, records, timestamp);
}
@Override
public void revert(String key, long record, Timestamp timestamp) {
concourse.revert(key, record, timestamp);
}
@Override
public Set<Long> search(String key, String query) {
return concourse.search(key, query);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records) {
return concourse.select(records);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Order order) {
return concourse.select(records, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Order order, Page page) {
return concourse.select(records, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Page page) {
return concourse.select(records, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Timestamp timestamp) {
return concourse.select(records, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Timestamp timestamp, Order order) {
return concourse.select(records, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Timestamp timestamp, Order order, Page page) {
return concourse.select(records, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<Long> records,
Timestamp timestamp, Page page) {
return concourse.select(records, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records) {
return concourse.select(keys, records);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Order order) {
return concourse.select(keys, records, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Order order, Page page) {
return concourse.select(keys, records, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Page page) {
return concourse.select(keys, records, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Timestamp timestamp) {
return concourse.select(keys, records, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Timestamp timestamp, Order order) {
return concourse.select(keys, records, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Timestamp timestamp, Order order,
Page page) {
return concourse.select(keys, records, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Collection<Long> records, Timestamp timestamp, Page page) {
return concourse.select(keys, records, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria) {
return concourse.select(keys, criteria);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Order order) {
return concourse.select(keys, criteria, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Order order, Page page) {
return concourse.select(keys, criteria, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Page page) {
return concourse.select(keys, criteria, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Timestamp timestamp) {
return concourse.select(keys, criteria, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Timestamp timestamp, Order order) {
return concourse.select(keys, criteria, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Timestamp timestamp, Order order, Page page) {
return concourse.select(keys, criteria, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
Criteria criteria, Timestamp timestamp, Page page) {
return concourse.select(keys, criteria, timestamp, page);
}
@Override
public <T> Map<String, Set<T>> select(Collection<String> keys,
long record) {
return concourse.select(keys, record);
}
@Override
public <T> Map<String, Set<T>> select(Collection<String> keys, long record,
Timestamp timestamp) {
return concourse.select(keys, record, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl) {
return concourse.select(keys, ccl);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Order order) {
return concourse.select(keys, ccl, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Order order, Page page) {
return concourse.select(keys, ccl, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Page page) {
return concourse.select(keys, ccl, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Timestamp timestamp) {
return concourse.select(keys, ccl, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Timestamp timestamp, Order order) {
return concourse.select(keys, ccl, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Timestamp timestamp, Order order, Page page) {
return concourse.select(keys, ccl, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Collection<String> keys,
String ccl, Timestamp timestamp, Page page) {
return concourse.select(keys, ccl, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria) {
return concourse.select(criteria);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Order order) {
return concourse.select(criteria, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Order order, Page page) {
return concourse.select(criteria, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Page page) {
return concourse.select(criteria, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Timestamp timestamp) {
return concourse.select(criteria, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Timestamp timestamp, Order order) {
return concourse.select(criteria, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Timestamp timestamp, Order order, Page page) {
return concourse.select(criteria, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(Criteria criteria,
Timestamp timestamp, Page page) {
return concourse.select(criteria, timestamp, page);
}
@Override
public <T> Map<String, Set<T>> select(long record) {
return concourse.select(record);
}
@Override
public <T> Map<String, Set<T>> select(long record, Timestamp timestamp) {
return concourse.select(record, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl) {
return concourse.select(ccl);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records) {
return concourse.select(key, records);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Order order) {
return concourse.select(key, records, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Order order, Page page) {
return concourse.select(key, records, order, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Page page) {
return concourse.select(key, records, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Timestamp timestamp) {
return concourse.select(key, records, timestamp);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Timestamp timestamp, Order order) {
return concourse.select(key, records, timestamp, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Timestamp timestamp, Order order, Page page) {
return concourse.select(key, records, timestamp, order, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Collection<Long> records,
Timestamp timestamp, Page page) {
return concourse.select(key, records, timestamp, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria) {
return concourse.select(key, criteria);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Order order) {
return concourse.select(key, criteria, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Order order, Page page) {
return concourse.select(key, criteria, order, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Page page) {
return concourse.select(key, criteria, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Timestamp timestamp) {
return concourse.select(key, criteria, timestamp);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Timestamp timestamp, Order order) {
return concourse.select(key, criteria, timestamp, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Timestamp timestamp, Order order, Page page) {
return concourse.select(key, criteria, timestamp, order, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, Criteria criteria,
Timestamp timestamp, Page page) {
return concourse.select(key, criteria, timestamp, page);
}
@Override
public <T> Set<T> select(String key, long record) {
return concourse.select(key, record);
}
@Override
public <T> Set<T> select(String key, long record, Timestamp timestamp) {
return concourse.select(key, record, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl, Order order) {
return concourse.select(ccl, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl, Order order,
Page page) {
return concourse.select(ccl, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl, Page page) {
return concourse.select(ccl, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl) {
return concourse.select(key, ccl);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl, Order order) {
return concourse.select(key, ccl, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl, Order order,
Page page) {
return concourse.select(key, ccl, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl, Page page) {
return concourse.select(key, ccl, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl,
Timestamp timestamp) {
return concourse.select(key, ccl, timestamp);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl,
Timestamp timestamp, Order order) {
return concourse.select(key, ccl, timestamp, order);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl,
Timestamp timestamp, Order order, Page page) {
return concourse.select(key, ccl, timestamp, order, page);
}
@Override
public <T> Map<Long, Set<T>> select(String key, String ccl,
Timestamp timestamp, Page page) {
return concourse.select(key, ccl, timestamp, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl,
Timestamp timestamp) {
return concourse.select(ccl, timestamp);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl,
Timestamp timestamp, Order order) {
return concourse.select(ccl, timestamp, order);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl,
Timestamp timestamp, Order order, Page page) {
return concourse.select(ccl, timestamp, order, page);
}
@Override
public <T> Map<Long, Map<String, Set<T>>> select(String ccl,
Timestamp timestamp, Page page) {
return concourse.select(ccl, timestamp, page);
}
@Override
public void set(String key, Object value, Collection<Long> records) {
concourse.set(key, value, records);
}
@Override
public <T> void set(String key, T value, long record) {
concourse.set(key, value, record);
}
@Override
public void stage() throws TransactionException {
concourse.stage();
}
@Override
public Timestamp time() {
return concourse.time();
}
@Override
public Timestamp time(String phrase) {
return concourse.time(phrase);
}
@Override
public Map<Long, Map<String, Set<Long>>> trace(Collection<Long> records) {
return concourse.trace(records);
}
@Override
public Map<Long, Map<String, Set<Long>>> trace(Collection<Long> records,
Timestamp timestamp) {
return concourse.trace(records, timestamp);
}
@Override
public Map<String, Set<Long>> trace(long record) {
return concourse.trace(record);
}
@Override
public Map<String, Set<Long>> trace(long record, Timestamp timestamp) {
return concourse.trace(record, timestamp);
}
@Override
public boolean unlink(String key, long destination, long source) {
return concourse.unlink(key, destination, source);
}
@Override
public boolean verify(String key, Object value, long record) {
return concourse.verify(key, value, record);
}
@Override
public boolean verify(String key, Object value, long record,
Timestamp timestamp) {
return concourse.verify(key, value, record, timestamp);
}
@Override
public boolean verifyAndSwap(String key, Object expected, long record,
Object replacement) {
return concourse.verifyAndSwap(key, expected, record, replacement);
}
@Override
public void verifyOrSet(String key, Object value, long record) {
concourse.verifyOrSet(key, value, record);
}
/**
* Construct an instance of this {@link ForwardingConcourse} using the
* provided {@code concourse} connection as the proxied handle.
*
* @param concourse
* @return an instace of this class
*/
protected abstract ForwardingConcourse $this(Concourse concourse);
@Override
protected final Concourse copyConnection() {
return $this(concourse.copyConnection());
}
} |
def clear_field_actions(self, fieldname):
if self._index is None:
raise errors.IndexerError("IndexerConnection has been closed")
if fieldname in self._field_actions:
del self._field_actions[fieldname]
self._config_modified = True |
import Vue from 'vue'
import Component from 'vue-class-component'
import { Model } from '../../src/decorators/Model'
describe(Model, () => {
@Component
class TestComponent extends Vue {
@Model('change', Boolean) checked!: boolean
}
const { $options } = new TestComponent()
test('define model option correctly', () => {
expect($options.model).toEqual({ prop: 'checked', event: 'change' })
})
test('define props option correctly', () => {
const props = ($options.props as any) as Record<string, any>
expect(props!['checked']).toEqual({ type: Boolean })
})
})
|
<gh_stars>10-100
use crate::common::*;
use super::{
expr::As, Aggregation, AudienceBoard, Between, BinaryOp, BinaryOperator, Column, Context,
ContextKey, Distribution, Expr, ExprMeta, ExprT, ExprTree, Function, FunctionName, GenericRel,
GenericRelTree, Hash, HashAlgorithm, Literal, LiteralValue, Noisy, Projection, Rel, RelT,
Selection, Table, TableMeta, ToContext, TryToContext, ValidateError,
};
use crate::node::Access;
use crate::opt::{ContextError, RebaseRel};
use super::privacy::*;
/// Small helper to figure out if a given context key matches any of the given field patterns
fn matches_in<'a, I: IntoIterator<Item = &'a String>>(
iter: I,
key: &'a ContextKey,
) -> Result<bool, ValidateError> {
for field in iter.into_iter() {
if key.matches(&field.parse()?) {
return Ok(true);
}
}
return Ok(false);
}
#[derive(Debug, Clone)]
pub struct Policy(pub policy::Policy);
pub struct Costly<T> {
root: T,
cost: f64,
}
impl<T> From<T> for Costly<T> {
fn from(root: T) -> Self {
Self { root, cost: 0. }
}
}
impl ExprTransform for WhitelistPolicy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match expr.as_ref() {
Expr::Column(Column(context_key)) => {
if matches_in(self.fields.iter(), &context_key)? {
Ok(expr.clone().into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
impl ExprTransform for HashPolicy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match expr.as_ref() {
Expr::Column(Column(context_key)) => {
if matches_in(self.fields.iter(), &context_key)? {
Ok(ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Hash(Hash {
algo: HashAlgorithm::default(),
expr: expr.clone(),
salt: self.salt.clone(),
})),
alias: context_key.name().to_string(),
}))
.into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
impl ExprTransform for ObfuscatePolicy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match expr.as_ref() {
Expr::Column(Column(context_key)) => {
if matches_in(self.fields.iter(), &context_key)? {
let expr = ExprT::from(Expr::Literal(Literal(LiteralValue::Null)));
let alias = context_key.name().to_string();
Ok(ExprT::from(Expr::As(As { expr, alias })).into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
impl ExprTransform for Policy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match &self.0 {
policy::Policy::Whitelist(whitelist) => whitelist.transform_expr(expr),
policy::Policy::Hash(hash) => hash.transform_expr(expr),
policy::Policy::Obfuscate(obfuscate) => obfuscate.transform_expr(expr),
_ => Err(Error::NoMatch),
}
}
}
#[async_trait]
impl RelTransform for DifferentialPrivacyPolicy {
async fn transform_rel<A: Access>(
&self,
rel: &RelT,
access: &A,
) -> Result<Costly<RelT>, Error> {
match rel.as_ref() {
GenericRel::Aggregation(Aggregation {
attributes,
group_by,
from,
}) => {
// FIXME: This could be optimized
let getter = FlexTableMetaGetter {
primary: self.entity.clone(),
access,
};
let flex = getter.rebase(rel).await;
if let Err(err) = flex.board.as_ref() {
trace!("rebase lead to incorrect tree, dropping match: {}", err);
return Err(Error::NoMatch);
}
let (flex_attributes, flex_group_by, flex_from) = match flex.as_ref() {
GenericRel::Aggregation(Aggregation {
attributes,
group_by,
from,
}) => (attributes, group_by, from),
_ => unreachable!(),
};
let mut factor = 1.;
let mut grouping_keys = HashSet::new();
for (expr, flex_expr) in group_by.iter().zip(flex_group_by.iter()) {
if let Expr::Column(Column(column_key)) = expr.as_ref() {
grouping_keys.insert(column_key);
let col_maximum_frequency = flex_expr
.board
.as_ref()
.map_err(|e| e.clone())?
.domain_sensitivity
.maximum_frequency
.0
.ok_or(Error::NoMatch)?;
factor *= col_maximum_frequency as f64;
} else {
return Err(Error::NoMatch);
}
if flex_expr.board.as_ref().map_err(|e| e.clone())?.taint.0 {
return Err(Error::NoMatch);
}
}
let bucket_alias = "__bucket_count";
let bucket_key = ContextKey::with_name(bucket_alias);
let maximum_frequency = flex_from
.board
.as_ref()
.map_err(|e| e.clone())?
.primary
.maximum_frequency
.0
.ok_or(Error::NoMatch)?;
let threshold = (self.bucket_size * maximum_frequency) as i64;
let one = ExprT::from(Expr::Literal(Literal(LiteralValue::Long(1))));
// this cost is per row
let mut cost = 0.;
let mut new_attributes = Vec::new();
let mut projection_attributes = Vec::new();
for (i, (expr, flex_expr)) in
attributes.iter().zip(flex_attributes.iter()).enumerate()
{
match expr.as_ref() {
Expr::Column(Column(column_key)) => {
if !grouping_keys.contains(&column_key) {
return Err(Error::NoMatch);
}
new_attributes.push(ExprT::from(Expr::As(As {
expr: expr.clone(),
alias: column_key.name().to_string(),
})));
projection_attributes.push(expr.clone());
}
Expr::Function(Function {
name,
args,
distinct,
}) => {
// assuming function is aggregation
let board = flex_expr.board.as_ref().map_err(|e| e.clone())?;
let sensitivity = board
.domain_sensitivity
.sensitivity
.0
.ok_or(Error::NoMatch)?;
let distribution = Distribution::Laplace {
mean: 0.,
variance: sensitivity / self.epsilon,
};
cost += self.epsilon;
let alias = format!("f{}_", i);
let new_expr = ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Noisy(Noisy {
expr: expr.clone(),
distribution,
})),
alias: alias.clone(),
}));
new_attributes.push(new_expr);
let alias_as_col =
ExprT::from(Expr::Column(Column(ContextKey::with_name(&alias))));
projection_attributes.push(alias_as_col);
}
_ => return Err(Error::NoMatch),
}
}
new_attributes.push(ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Noisy(Noisy {
expr: ExprT::from(Expr::Function(Function {
name: FunctionName::Count,
args: vec![one.clone()],
distinct: false,
})),
distribution: Distribution::Laplace {
mean: 0.,
variance: 1. / self.epsilon,
},
})),
alias: bucket_alias.to_string(),
})));
let noised_root = RelT::from(GenericRel::Aggregation(Aggregation {
attributes: new_attributes,
group_by: group_by.clone(),
from: from.clone(),
}));
let where_bucket_count = ExprT::from(Expr::BinaryOp(BinaryOp {
op: BinaryOperator::Gt,
left: ExprT::from(Expr::Column(Column(bucket_key))),
right: { ExprT::from(Expr::Literal(Literal(LiteralValue::Long(threshold)))) },
}));
let new_root = RelT::from(GenericRel::Projection(Projection {
from: RelT::from(GenericRel::Selection(Selection {
from: noised_root,
where_: where_bucket_count,
})),
attributes: projection_attributes,
}));
let ctx = access.context().await.unwrap();
let new_root = RebaseRel::<'_, TableMeta>::rebase(&ctx, &new_root).await; // repair it
Ok(Costly {
root: new_root,
cost,
})
}
_ => Err(Error::NoMatch),
}
}
}
#[async_trait]
impl RelTransform for AggregationPolicy {
async fn transform_rel<A: Access>(
&self,
rel: &RelT,
access: &A,
) -> Result<Costly<RelT>, Error> {
match rel.as_ref() {
GenericRel::Aggregation(Aggregation {
attributes,
group_by,
from,
}) => {
let entity_key = ContextKey::with_name(&self.entity);
let entity_alias_str = format!("policy_{}", entity_key.name());
let entity_alias = ContextKey::with_name(&entity_alias_str);
let ctx = access.context().await.unwrap();
let rewritten: RelT = rel
.clone()
.try_fold(&mut |child| match child {
GenericRel::Table(Table(context_key)) => {
let table_meta = ctx.get(&context_key).unwrap();
let columns = table_meta.to_context();
if columns.get_column(&entity_key).is_ok() {
Ok(RelT {
root: GenericRel::Table(Table(context_key)),
board: Ok(table_meta.clone()),
})
} else {
Err(Error::NoMatch)
}
}
GenericRel::Projection(Projection { attributes, from }) => {
let mut attributes = attributes.clone();
attributes.push(ExprT::from(Expr::Column(Column(entity_key.clone()))));
Ok(RelT::from(GenericRel::Projection(Projection {
attributes,
from,
})))
}
GenericRel::Aggregation(Aggregation {
attributes,
from,
group_by,
}) => {
let mut attributes = attributes
.iter()
.cloned()
.enumerate()
.map(|(i, expr)| {
ExprT::from(Expr::As(As {
expr,
alias: format!("f{}_", i),
}))
})
.collect::<Vec<_>>();
attributes.push(ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Function(Function {
name: FunctionName::Count,
args: vec![ExprT::from(Expr::Column(Column(
entity_key.clone(),
)))],
distinct: true,
})),
alias: entity_alias.name().to_string(),
})));
Ok(RelT::from(GenericRel::Aggregation(Aggregation {
attributes,
from,
group_by,
})))
}
_ => Ok(RelT::from(child)),
})
.unwrap();
let rewritten = RebaseRel::<'_, TableMeta>::rebase(&ctx, &rewritten).await; // repair it
let board = rewritten.board.as_ref().map_err(|_| Error::NoMatch)?;
if board.to_context().get(&entity_alias).is_ok() {
let where_ = ExprT::from(Expr::BinaryOp(BinaryOp {
left: ExprT::from(Expr::Column(Column(entity_alias))),
op: BinaryOperator::Gt,
right: ExprT::from(Expr::Literal(Literal(LiteralValue::Long(
self.minimum_bucket_size as i64,
)))),
}));
let num_cols = board.columns.len();
let new_root = RelT::from(GenericRel::Projection(Projection {
from: RelT::from(GenericRel::Selection(Selection {
from: rewritten,
where_,
})),
attributes: {
(0..(num_cols - 1))
.into_iter()
.map(|i| {
let context_key = ContextKey::with_name(&format!("f{}_", i));
ExprT::from(Expr::Column(Column(context_key)))
})
.collect::<Vec<_>>()
},
}));
let new_root = RebaseRel::<'_, TableMeta>::rebase(&ctx, &new_root).await;
Ok(new_root.into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
#[async_trait]
impl RelTransform for Policy {
async fn transform_rel<A: Access>(
&self,
rel: &RelT,
access: &A,
) -> Result<Costly<RelT>, Error> {
match &self.0 {
policy::Policy::DifferentialPrivacy(differential_privacy) => {
differential_privacy.transform_rel(rel, access).await
}
policy::Policy::Aggregation(aggregation) => {
aggregation.transform_rel(rel, access).await
}
_ => Err(Error::NoMatch),
}
}
}
#[derive(derive_more::From, Debug)]
pub enum Error {
NoMatch,
Validate(ValidateError),
}
pub trait ExprTransform {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error>;
}
#[async_trait]
pub trait RelTransform {
async fn transform_rel<A: Access>(&self, rel: &RelT, access: &A)
-> Result<Costly<RelT>, Error>;
}
#[derive(Clone, Debug)]
pub struct PolicyBinding {
pub policies: Vec<Policy>,
pub priority: u64,
pub budget: Option<PolicyBudget>,
}
impl PolicyBinding {
fn is_in_budget(&self, proposed: f64) -> bool {
self.budget
.as_ref()
.map(|PolicyBudget { maximum, used, .. }| used + proposed <= *maximum)
.unwrap_or(true)
}
}
pub struct RelTransformer<'a, A> {
bindings: &'a Context<PolicyBinding>,
audience: &'a BlockType,
access: &'a A,
}
impl<'a, A> RelTransformer<'a, A>
where
A: Access,
{
pub fn new(
bindings: &'a Context<PolicyBinding>,
audience: &'a BlockType,
access: &'a A,
) -> Self {
debug!(
"initializing relation transformer for {} with bindings={:?}",
audience, bindings
);
Self {
bindings,
audience,
access,
}
}
/// Filter the policy bindings that apply to the `context_key`
fn filter_bindings<'b>(&'b self, context_key: &'b ContextKey) -> Context<&'a PolicyBinding> {
debug!("sifting policies for {}", context_key);
self.bindings
.iter()
.filter_map(move |(key, binding)| {
if key.prefix_matches(context_key) {
Some((key.clone(), binding))
} else {
None
}
})
.collect()
}
pub fn transform_rel<'b>(
&'b self,
rel_t: &'b RelT,
) -> Pin<Box<dyn Future<Output = Result<Transformed<RelT>, Error>> + Send + 'b>> {
async move {
let unraveled = rel_t.root.map(&mut |child| child.as_ref());
let proposed = match unraveled {
Rel::Projection(Projection {
mut attributes,
from:
RelT {
root: Rel::Table(Table(context_key)),
board,
},
}) => {
debug!("potential expr leaf policy condition met");
let from = RelT {
root: Rel::Table(Table(context_key.clone())),
board: board.clone(),
};
let bindings = self.filter_bindings(context_key);
debug!("bindings filtered to {:?}", bindings);
let mut cost = HashMap::new();
let mut priority = 0;
let expr_transformer = ExprTransformer::new(&bindings, &self.audience);
for expr_t in attributes.iter_mut() {
match expr_transformer.transform_expr(expr_t) {
Ok(transformed) => {
debug!("successfully transformed expression");
transformed.add_to(&mut cost);
*expr_t = transformed.root;
priority = max(priority, transformed.priority);
}
Err(Error::NoMatch) => {}
Err(err) => return Err(err),
}
}
let root = RelT::from(Rel::Projection(Projection { attributes, from }));
debug!("rebuilt leaf relation node {:?}", root);
let audience = root
.board
.as_ref()
.map(|board| &board.audience)
.map_err(|e| Error::Validate(e.clone()))?;
debug!(
"after transformation of expression, audience: {:?}",
audience
);
if audience.contains(&self.audience) {
vec![Transformed {
root,
cost,
priority,
}]
} else {
vec![]
}
}
_ => {
let provenance = rel_t
.board
.as_ref()
.map_err(|e| Error::Validate(e.clone()))?
.provenance
.as_ref();
if let Some(provenance) = provenance {
let bindings = self.filter_bindings(provenance);
let mut candidates = Vec::new();
for (key, binding) in bindings.iter() {
for policy in binding.policies.iter() {
match policy.transform_rel(rel_t, self.access).await {
Ok(Costly { mut root, cost }) => {
root.board
.as_mut()
.map(|board| {
board.audience.insert(self.audience.clone())
})
.map_err(|e| Error::Validate(e.clone()))?;
let transformed =
Transformed::new(root, key, cost, binding.priority);
candidates.push(transformed);
}
Err(Error::NoMatch) => {}
Err(err) => return Err(err),
}
}
}
candidates
} else {
vec![]
}
}
};
if let Some(best) = Transformed::best_candidate(proposed) {
debug!("best candidate for relation: {:?}", best);
Ok(best)
} else {
debug!("no candidate for relation at this level");
if rel_t.is_leaf() {
debug!("leaf relation attained, no match");
return Err(Error::NoMatch);
}
let state = Mutex::new((HashMap::new(), 0u64));
let state_ref = &state;
let root = RelT::from(
rel_t
.root
.map_async(async move |child| {
self.transform_rel(child).await.map(|transformed| {
let mut state = state_ref.lock().unwrap();
transformed.add_to(&mut state.0);
state.1 = max(state.1, transformed.priority);
transformed.root
})
})
.await
.into_result()?,
);
let state_ = state.lock().unwrap();
let transformed = Transformed {
root,
cost: state_.0.clone(),
priority: state_.1,
};
debug!("from level below, got best relation tree {:?}", transformed);
Ok(transformed)
}
}
.boxed()
}
}
pub struct ExprTransformer<'a> {
bindings: &'a Context<&'a PolicyBinding>,
audience: &'a BlockType,
}
impl<'a> ExprTransformer<'a> {
fn new(bindings: &'a Context<&'a PolicyBinding>, audience: &'a BlockType) -> Self {
Self { bindings, audience }
}
fn transform_expr(&self, expr_t: &ExprT) -> Result<Transformed<ExprT>, Error> {
let mut proposed = Vec::new();
for (key, binding) in self.bindings.iter() {
let priority = binding.priority;
for policy in binding.policies.iter() {
match policy.transform_expr(expr_t) {
Ok(Costly { mut root, cost }) => {
root.board
.as_mut()
.map(|board| {
board.audience.insert(self.audience.clone());
})
.map_err(|e| Error::Validate(e.clone()))?;
let transformed = Transformed::new(root, key, cost, priority);
proposed.push(transformed);
}
Err(Error::NoMatch) => {}
Err(err) => return Err(err),
}
}
}
if let Some(best) = Transformed::best_candidate(proposed) {
// select the best strategy
Ok(best)
} else {
// no match so far, let's try deeper
if expr_t.is_leaf() {
return Err(Error::NoMatch);
}
let mut cost = HashMap::new();
let mut priority = 0;
let root = ExprT::from(
expr_t
.root
.map(&mut |child| {
self.transform_expr(child).map(|transformed| {
transformed.add_to(&mut cost);
priority = max(priority, transformed.priority);
transformed.root
})
})
.into_result()?,
);
Ok(Transformed {
root,
cost,
priority,
})
}
}
}
#[derive(Debug, Clone)]
pub struct Transformed<T> {
pub root: T,
pub cost: HashMap<ContextKey, f64>,
pub priority: u64,
}
impl<T> Transformed<T> {
pub fn default(root: T) -> Self {
Self {
root,
cost: HashMap::new(),
priority: 0,
}
}
fn new(root: T, binding_key: &ContextKey, cost: f64, priority: u64) -> Self {
Self {
root,
cost: {
let mut cost_ = HashMap::new();
cost_.insert(binding_key.clone(), cost);
cost_
},
priority,
}
}
pub fn into_inner(self) -> T {
self.root
}
fn best_candidate<I>(iter: I) -> Option<Self>
where
I: IntoIterator<Item = Self>,
{
let proposed: Vec<_> = iter.into_iter().collect();
let highest = proposed
.iter()
.max_by(|l, r| l.priority.cmp(&r.priority))
.map(|highest| highest.priority)?;
let candidates = proposed
.into_iter()
.filter(|t| t.priority == highest)
.collect::<Vec<_>>();
let best = candidates
.into_iter()
.min_by(|l, r| l.total_cost().partial_cmp(&r.total_cost()).unwrap())
.unwrap();
Some(best)
}
fn total_cost(&self) -> f64 {
self.cost.values().sum()
}
fn add_to(&self, costs: &mut HashMap<ContextKey, f64>) {
for (key, cost) in self.cost.iter() {
*costs.entry(key.clone()).or_default() += cost;
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::node::state::tests::read_manifest;
use crate::node::tests::mk_node;
use crate::opt::validate::Validator;
use tokio::runtime::Runtime;
use parallax_api::block_type;
fn test_transform_for(query: &str) -> Transformed<RelT> {
let random_scope = uuid::Uuid::new_v4().to_simple().to_string();
let access = Arc::new(mk_node(&random_scope));
for resource in read_manifest().into_iter() {
access.create_resource(resource).unwrap();
}
Runtime::new().unwrap().block_on(async {
let ctx = access.context().await.unwrap();
let validator = Validator::new(&ctx);
let policies = access.policies_for_group("wheel").unwrap();
let rel_t = validator.validate_str(query).unwrap();
let audience = block_type!("resource"."group"."wheel");
let transformer = RelTransformer::new(&policies, &audience, &access);
let rel_t = transformer
.transform_rel(&rel_t)
.await
.or_else(|error| match error {
super::Error::NoMatch => Ok(Transformed::default(rel_t)),
super::Error::Validate(err) => Err(err),
})
.unwrap();
rel_t
})
}
#[test]
fn transform_blocked() {
let rel_t = test_transform_for(
"\
SELECT person_id FROM patient_data.person
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta.audience.is_empty())
}
#[test]
fn transform_whitelist() {
let rel_t = test_transform_for(
"\
SELECT vocabulary_id FROM patient_data.vocabulary
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")))
}
use crate::opt::expr::As;
#[test]
fn transform_obfuscation() {
let rel_t = test_transform_for(
"\
SELECT address_1 FROM patient_data.location
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")));
match rel_t.root {
Rel::Projection(Projection { attributes, .. }) => {
match attributes[0]
.as_ref()
.map_owned(&mut |child| child.as_ref())
{
Expr::As(As {
expr: Expr::Literal(Literal(LiteralValue::Null)),
alias,
}) => assert_eq!(alias, "address_1".to_string()),
_ => panic!("`review_id` was not obfuscated"),
}
}
_ => unreachable!(),
}
}
#[test]
fn transform_hash() {
let rel_t = test_transform_for(
"\
SELECT care_site_name FROM patient_data.care_site
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")));
match rel_t.root {
Rel::Projection(Projection { attributes, .. }) => {
match attributes[0]
.as_ref()
.map_owned(&mut |child| child.as_ref())
{
Expr::As(As { expr, .. }) => match expr {
Expr::Hash(..) => {}
_ => panic!("`care_site_name` was not hashed"),
},
_ => panic!("`care_site_name` was not hashed"),
}
}
_ => unreachable!(),
}
}
#[test]
fn transform_diff_priv() {
let rel_t = test_transform_for(
"\
SELECT gender_concept_id, COUNT(person_id) \
FROM patient_data.person \
GROUP BY gender_concept_id
",
);
// For now this is enough in order to check that diff priv was triggered
// as it is the only policy with an associated cost
assert!(*rel_t.cost.values().next().unwrap() > 0f64);
}
#[test]
fn transform_aggregation() {
let rel_t = test_transform_for(
"\
SELECT state, COUNT(DISTINCT location_id) \
FROM patient_data.location \
GROUP BY state \
",
);
let table_meta = rel_t.root.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")));
}
}
|
// NewFeedsManagerResource constructs a new FeedsManagerResource.
func NewFeedsManagerResource(ms feeds.FeedsManager) *FeedsManagerResource {
return &FeedsManagerResource{
JAID: NewJAIDInt64(ms.ID),
Name: ms.Name,
URI: ms.URI,
PublicKey: ms.PublicKey,
JobTypes: ms.JobTypes,
Network: ms.Network,
CreatedAt: ms.CreatedAt,
}
} |
def domains_dns_setCustom(self, domain, host_records):
extra_payload = host_records
sld, tld = domain.split(".")
extra_payload['SLD'] = sld
extra_payload['TLD'] = tld
self._call("namecheap.domains.dns.setCustom", extra_payload) |
Lindzen Illusion #3 and Christy Crock #5: Opposing Climate Solutions
Posted on 3 May 2011 by dana1981
Recently, "skeptic" climate scientist John Christy testified before U.S. Congress, and both Christy and fellow "skeptic" Richard Lindzen have been interviewed on an Australian radio talk show regarding the country's proposed carbon tax.
In each situation, these two scientists spoke out against efforts to address climate change by reducing carbon dioxide (CO2) and other greenhouse gas (GHG) emissions.
Tragedy of the Commons Once Again
Their main argument seems to be becoming a favorite amongst "skeptics": "CO2 limits will make little difference." In his radio interview, Christy applied the argument to California (which is attempting to implement a carbon cap and trade system), Australia (with the aforementioned proposed carbon tax), and in his congressional testimony, to the USA:
"We’re talking about less than a hundredth of a degree [if California cuts emissions by 26% by 2016]. It’s just so miniscule; I mean the global temperature changes by more than that from day to day. So this is what we call in Alabama “spitting in the ocean”." "On the climate front, [Australia cutting its emissions by 5% by 2020] will be imperceptible or minuscule compared to what the rest of the world is doing." "you're looking at most at a tenth of a degree [reduction in global temperature] after 100 years [if USA imposes CO2 limits]"
Lindzen attempted to apply the argument globally:
"The evidence is pretty good that even if everyone [cut emissions] in the whole world it wouldn't make a lot of difference."
We have previously addressed this argument as applied to Australia by Chris Monckton and David Evans, and applied to the USA by David Montgomery. However, given that the USA is the largest historical CO2 emitter, the second-largest current emitter, and high on the list in terms of per capita emissions, it may be worthwhile to evaluate these claims. Let's run the numbers using CO2 emissions data from the U.S. Energy Information Administration.
Current global CO2 emissions total approximately 30 billion tons (Gt) per year, with the USA contributing approximately 20% (5.8 Gt per year). US emissions have risen approximately 15% since 1990, so let's assume in a business as usual scenario, they will continue to rise at that rate. In this case, total US CO2 emissions between now and 2050 will total approximately 275 Gt. The IPCC projects that in business as usual, global CO2 emissions will total approximately 2,200 Gt over that period.
If the USA were to follow through with proposals to reduce CO2 emissions 83% below 2005 levels by 2050, the result would roughly cut the country's emissions over that period in half, to 140 Gt, reducing global emissions to approximately 2,060 Gt.
Approximately 55% of human CO2 emissions currently remain airborne (the remainder is absorbed by carbon sinks), and each 7.8 Gt CO2 emitted corresponds to roughly 1 part per million by volume (ppmv) increase in atmospheric CO2 concentration. Thus the US cuts would reduce the atmospheric CO2 concentration to approximately 540 ppmv compared to 550 ppmv in business as usual in 2050.
Assuming the IPCC most likely climate sensitivity value of 3°C for doubled CO2 (incorporating only fast feedbacks - remember, long-term sensitivity is even higher) is correct, these US emissions cuts by themselves would reduce the amount of equilibrium warming by 0.08°C, from roughly 2.9 to 2.8°C surface warming above pre-industrial levels. And of course Australia and California's cuts would have even less effect on global temperatures, as they have smaller populations and thus lower total emissions. So Lindzen and Christy have a point here, right?
Well, no. In particular, Lindzen claims that global emissions cuts "wouldn't make a lot of difference." But let's say international negotiations succeeded in convincing countries all around the world to reduce global CO2 emissions by 50% below 1990 levels by 2050. Now suddenly instead of 2,200 Gt CO2 emitted in the next four decades, it's only about 820 Gt. Now instead of 550 ppmv in 2050, we're looking at about 450 ppmv.
Instead of committing ourselves to 2.9°C warming above pre-industrial levels as in business as usual, we're only committed to 2°C, which keeps us right at the cusp of the global warming "danger limit." Plus rather than blowing past the danger limit with CO2 levels continuing to rise rapidly, we'll have set up the technologies and infrastructure necessary to continue reducing emissions to safe levels. Remember, the last time atmospheric CO2 was at current levels, global temperatures were 3 to 4°C warmer than pre-industrial, and sea levels were around 25 meters higher than current sea level. So we really should aim to eventually stabilize atmospheric CO2 at no higher than 350 ppmv, and the more CO2 we emit now, the more difficult that will be. We're currently adding another 2 ppmv CO2 to the atmoosphere per year, continually moving further away from that 350 ppmv target.
So clearly Lindzen is wrong that global emissions cuts won't make a difference. And the only way we're going to achieve large global emissions cuts is if major emitters like the USA and Australia lead the way in reducing their emissions. And the USA is more likely to proceed if states like California demonstrate that CO2 limits can be implemented successfully. Thus although these individual cuts won't have a significant direct impact on global temperatures, they can have a major indirect effect by triggering more widespread emissions cuts.
Costs vs. Benefits
In his Australian radio interview, Lindzen also claimed that the costs of CO2 limits would outweigh the benefits.
"[CO2 limits are] a heavy cost for no benefit, and it's no benefit for you, no benefit for your children, no benefit for your grandchildren, no benefit for your great-great-great-great-grandchildren. I mean, what's the point of that?"
Christy made a similar argument both in his US Congressional testimony and his Australian radio interview.
"this issue has policy implications that may potentially raise the price of energy a lot, and thus essentially the price of everything else." "I would think a couple of things will happen [if Australia cuts its emissions by 5% by 2020]. One is that your energy prices will rise and your economy then will begin to turn downward. And you will provide opportunities for other nations to take up the slack that Australia used to provide the world."
It's true that carbon limits would likely cause a modest rise in the market price of energy. However, the funds from selling carbon emissions permits could be used to offset this price increase through improved energy efficiency and other measures. Economic studies showed that the proposed CO2 limits in the USA would have virtually no impact on average electricity bills, for example. It's important to distinguish between prices and bills – an increase in the former doesn't necessarily cause an increase in the latter, if other measures are taken to prevent bills from rising.
Moreover, the true cost of coal energy, on which Australia and the USA rely heavily, is approximately triple the market price, which does not account for factors like impacts on public or environmental health. Thus a carbon price more accurately reflects this true cost in the market price, and also aids in the transition to other energy sources whose true cost is actually lower than fossil fuels. So although market prices may rise, the total cost paid by Australians, Americans, etc. will actually fall.
This is one of the reasons that contrary to Lindzen's claims, the benefits of carbon limits outweigh the costs several times over. This is something that economic studies and economic experts consistently agree about. You could even call it an economic climate consensus. A recent survey of 144 of the world's top economists with expertise on climate change found that 88% agreed that the benefits of carbon pricing outweigh the costs, and over 94% agreed the US should reduce its GHG emissions if other major emitters also commit to reductions (which many already have, particularly in Europe):
NYU IPI survey results when asked under what circumstances the USA should reduce its emissions
Stick to What You Know
In their comments dissuading Australian and American efforts to address the threats posed by global warming and climate change, Lindzen and Christy made a number of erroneous and false statements. Perhaps these climate scientists should leave the economic arguments to, you know, economists. And they should certainly stop promoting the Tragedy of the Commons. If the USA can argue that its emissions cuts won't make a difference, then every country can make the argument. And if everyone makes it, we'll fail to achieve even modest emissions cuts, and as a result we will all doom ourselves to increasingly dangerous global warming and climate change.
A good scientist should not encourage us to play Russian Roulette with the climate, all the while adding more and more bullets to the chamber. |
// This implementation comes from the pseudocode in Andrew J. Hanson and Hui Ma,
// Parallel Transport Approach to Curve Framing (1995)
inline frame3f parallel_trans_frame(const vec3f &pos, const vec3f &tangent, const frame3f &pframe)
{
auto b = cross(tangent, pframe.z);
if (length(b) < 0.01f)
return make_frame_fromzx(pos, pframe.z, pframe.x);
b = normalize(b);
auto t = acosf(dot(tangent, pframe.z));
return make_frame_fromzx(pos, tangent, transform_point(rotation_frame(b, t), pframe.x));
} |
Looking for news you can trust?
Subscribe to our free newsletters.
Not to pick on the Sunshine State, where I was reared and the 2000 election was sorta decided, but it’s always been the crazy-news nexus of the universe…and that was before last year’s elections, which handed legislative supermajorities and every state cabinet office over to the GOP—including the governorship, to tea party-friendly (and common-sense-challenged) Gov. Rick Scott. In recent weeks, we’ve detailed the hilarity that ensues when tea partiers decide to dismantle the protections of government that had been assembled by Democrats and Republicans alike in this, the fourth-largest state in the union. Included in the fun:
But wait! There’s more! Here’s a roundup of the latest Tallahassee terror from just the past three days. If we have time, this will probably become a regular feature. There should be no shortage of down-South silliness, at least until the 2012 elections.
1) The anti-government, anti-tax GOP has finally found a solution to its budget woes: Hike up the cost of state education! College tuition will likely rise 15 percent next year (again), the maximum permitted by law. But reducing access to public colleges for the worst-off students may not be enough to kill off the state’s deficit woes, so other steps will need to be taken, like:
2) Killing off tenure for college professors! It took all of a few minutes Tuesday for a state House committee to approve a measure that would force all professors into one-year renewable contracts and leave them vulnerable to firing for “poor performance,” however that’s defined. (We guess it has something to do with how much pro-union email you forward.)
3) What’s another quick way to “save” government money, steer contracts to friends, and mess with a Democratic state stronghold? Privatize the jails—especially in blue counties! On a straight 15-8 party line, the House appropriations committee approved language to turn all of the Broward and Miami-Dade County jails over to private firms. Hopefully, they can be filled with freshly convicted felons before the next election, amirite?
4) GOP senators are also moving forward on a bill to make evictions of tenants easier in Miami-Dade, which is ground zero for the mortgage bust…as well as a Democratic bastion where it’s much harder to vote when you don’t have a place of residence. (Maybe they can ship transients to those newly privatized jails.) Even law enforcement was against this one. But hey, they’re unionized public employees! Who can trust ’em?
5) God, it’s so hard to become a barber in Florida, what with needing a license that costs money! Thank goodness GOPers this week are speeding a “Deregulation of Professions” bill, which would get rid of the state’s agencies for licensing and regulating
“yacht & ship brokers, auctioneers, talent agencies, athlete agents, persons practicing hair braiding, hair wrapping, or body wrapping, interior designers, professional fundraising consultants & solicitors, water vending machines & operators, health studios, ballroom dance studios, commercial telephone sellers & salespersons, movers & moving brokers, certain outdoor theaters, certain business opportunities, motor vehicle repair shops, sellers of travel, contracts with sales representatives involving commissions, & television picture tubes…”
How much will this triumph of deregulation save state taxpayers? It will save them negative $6 million, and negative 100 jobs. Which is to say, it will cost $6 million and 100 jobs extra over not deregulating (also known as, you know, regulating). Buy with confidence, Floridians!
6) Also worth not doing, according to GOPers: requiring tire sellers to tell you how old the tires are when you buy them. This bill was sponsored by a Republican senator, who lost sight in one eye due to an accident caused by blowout on tires that were too old. His party overruled him.
7) Florida Republicans would like to pass a “fetal pain abortion bill” soon. Because, you know, everybody’s doing it.
8) Speaking of public employees, have we cut $1 billion from their pay and benefits yet? Are they still getting raises to match the ever-rocketing cost of living in Florida? Yeah, let’s get on that, shall we?
9) Also, GOPers are not going to kill state workers’ rights to bargain collectively, since they don’t really have that right in Florida anyway. But what they can do is make it illegal to have workers’ union dues directly debited from their state pay. The sponsor of this bill says tons of union members have called and written him to support the plan. Too bad they couldn’t find a record of a single supportive union member when the St. Pete Times requested said records.
10) Finally, Rick Scott’s got problems. First, the good news: His approval rating has remained steady since he took office…at 32 percent. The bad news: His disapproval rating has skyrocketed to 55 percent, which “makes him the least popular currently serving governor,” according to Public Policy Polling (pdf).
Also, one of his showcase plans is in serious danger of imploding, again: As his first act in office, this prophet of deregulation signed an executive order to freeze all new rulemaking in every state government office until his administration could review and approve each one. Too bad he never announced how the myriad rules would be reviewed or by whom…but then, that would be creating a new rule for rules, wouldn’t it! Damned government waste! In any case, Scott’s rulemaking freeze is now being challenged in court: He’s being sued by a blind woman in Miami who lives on food stamps, who says her ability to reapply for food stamps online would have been improved by one of the rules Scott’s office is sitting on. The woman is being represented by Sandy D’Alemberte, an ex-president and law professor at Florida State University whose penchant for bowties, suspenders, and playing the simple Southern lawyer could spell even more trouble for the beleaguered governor. Hell, he could even see his popularity slide further. Wait…no he couldn’t. No, really, it’s statistically unlikely to happen.
But if it does, rest assured, dear reader, it will be in the next Sunshine State roundup! |
import AjaxCacheController from '../../../../shared/modules/AjaxCache/AjaxCacheController';
import CacheInvalidationRulesVO from '../../../../shared/modules/AjaxCache/vos/CacheInvalidationRulesVO';
import APIControllerWrapper from '../../../../shared/modules/API/APIControllerWrapper';
import IAPIController from '../../../../shared/modules/API/interfaces/IAPIController';
import IAPIParamTranslator from '../../../../shared/modules/API/interfaces/IAPIParamTranslator';
import APIDefinition from '../../../../shared/modules/API/vos/APIDefinition';
import EnvHandler from '../../../../shared/tools/EnvHandler';
import AjaxCacheClientController from '../AjaxCache/AjaxCacheClientController';
export default class ClientAPIController implements IAPIController {
public static getInstance(): ClientAPIController {
if (!ClientAPIController.instance) {
ClientAPIController.instance = new ClientAPIController();
}
return ClientAPIController.instance;
}
private static instance: ClientAPIController = null;
public get_shared_api_handler<T extends IAPIParamTranslator<T>, U>(
api_name: string,
sanitize_params: (...params) => any[] = null,
precondition: (...params) => boolean = null,
precondition_default_value: any = null,
registered_apis: { [api_name: string]: APIDefinition<any, any> } = {},
sanitize_result: (res: any, ...params) => any = null): (...params) => Promise<U> {
return async (...params) => {
let apiDefinition: APIDefinition<T, U> = registered_apis[api_name];
if (!apiDefinition) {
throw new Error('API client undefined:' + api_name + ':');
}
if (sanitize_params) {
params = sanitize_params(...params);
}
if (precondition && !precondition(...params)) {
if (sanitize_result) {
return sanitize_result(precondition_default_value, ...params);
}
return precondition_default_value;
}
let res = await this.handleAPI(apiDefinition, ...params);
if (sanitize_result) {
res = sanitize_result(res, ...params);
}
return res;
};
}
private async handleAPI<T extends IAPIParamTranslator<T>, U>(apiDefinition: APIDefinition<T, U>, ...api_params): Promise<U> {
let translated_param: IAPIParamTranslator<T> = APIControllerWrapper.getInstance().translate_param(apiDefinition, ...api_params);
let api_name = apiDefinition.api_name;
let API_TYPES_IDS_involved = apiDefinition.API_TYPES_IDS_involved;
if ((API_TYPES_IDS_involved != CacheInvalidationRulesVO.ALWAYS_FORCE_INVALIDATION_API_TYPES_INVOLVED) && !Array.isArray(API_TYPES_IDS_involved)) {
API_TYPES_IDS_involved = API_TYPES_IDS_involved(translated_param);
}
let api_res = null;
switch (apiDefinition.api_type) {
case APIDefinition.API_TYPE_GET:
let url_param: string =
(translated_param && translated_param.translateToURL) ? translated_param.translateToURL() :
(translated_param ? translated_param.toString() : "");
api_res = await AjaxCacheClientController.getInstance().get(
apiDefinition,
(APIControllerWrapper.BASE_API_URL + api_name + "/" + url_param).toLowerCase(),
API_TYPES_IDS_involved,
(!EnvHandler.getInstance().MSGPCK) ? 'application/json; charset=utf-8' : AjaxCacheController.MSGPACK_REQUEST_TYPE) as U;
break;
case APIDefinition.API_TYPE_POST_FOR_GET:
api_res = await AjaxCacheClientController.getInstance().get(
apiDefinition,
(APIControllerWrapper.BASE_API_URL + api_name).toLowerCase(),
API_TYPES_IDS_involved,
((typeof translated_param != 'undefined') && (translated_param != null)) ? ((!EnvHandler.getInstance().MSGPCK) ? JSON.stringify(APIControllerWrapper.getInstance().try_translate_vos_to_api(translated_param)) : APIControllerWrapper.getInstance().try_translate_vos_to_api(translated_param)) : null,
null,
(!EnvHandler.getInstance().MSGPCK) ? 'application/json; charset=utf-8' : AjaxCacheController.MSGPACK_REQUEST_TYPE,
null,
null,
true) as U;
break;
case APIDefinition.API_TYPE_POST:
if (apiDefinition.api_return_type == APIDefinition.API_RETURN_TYPE_FILE) {
let filePath: string = await AjaxCacheClientController.getInstance().post(
apiDefinition,
(APIControllerWrapper.BASE_API_URL + api_name).toLowerCase(),
API_TYPES_IDS_involved,
((typeof translated_param != 'undefined') && (translated_param != null)) ? ((!EnvHandler.getInstance().MSGPCK) ? JSON.stringify(APIControllerWrapper.getInstance().try_translate_vos_to_api(translated_param)) : APIControllerWrapper.getInstance().try_translate_vos_to_api(translated_param)) : null,
null,
(!EnvHandler.getInstance().MSGPCK) ? 'application/json; charset=utf-8' : AjaxCacheController.MSGPACK_REQUEST_TYPE) as string;
const { default: $ } = await import(/* webpackChunkName: "jquery" */ 'jquery');
let iframe = $('<iframe style="display:none" src="' + filePath + '"></iframe>');
$('body').append(iframe);
return;
} else {
api_res = await AjaxCacheClientController.getInstance().post(
apiDefinition,
(APIControllerWrapper.BASE_API_URL + api_name).toLowerCase(),
API_TYPES_IDS_involved,
((typeof translated_param != 'undefined') && (translated_param != null)) ? ((!EnvHandler.getInstance().MSGPCK) ? JSON.stringify(APIControllerWrapper.getInstance().try_translate_vos_to_api(translated_param)) : APIControllerWrapper.getInstance().try_translate_vos_to_api(translated_param)) : null,
null,
(!EnvHandler.getInstance().MSGPCK) ? 'application/json; charset=utf-8' : AjaxCacheController.MSGPACK_REQUEST_TYPE) as U;
}
}
// On tente de traduire si on reconnait un type de vo
api_res = APIControllerWrapper.getInstance().try_translate_vo_from_api(api_res);
return api_res;
}
} |
/*
Note: The Barricade rom is using a resolution of 32x24 which suggests slightly
different hardware from HitMe (40x19) however the screenshot on the arcade
flyer is using a 40x19 resolution. So is this a different version of
Barricade or is the resolution set by a dip switch?
*/
void hitme_state::barricad(machine_config &config)
{
hitme(config);
m_screen->set_size(32*8, 24*8);
m_screen->set_visarea(0*8, 32*8-1, 0*8, 24*8-1);
m_screen->set_screen_update(FUNC(hitme_state::screen_update_barricad));
m_gfxdecode->set_info(gfx_barricad);
MCFG_VIDEO_START_OVERRIDE(hitme_state,barricad)
} |
import { memo } from 'react';
import { Form, Field } from 'react-final-form';
import { SubscriptionField } from 'features/Auth/SubscriptionField/SubscriptionField';
import { CheckboxesList, Dropdown, Review, TimePicker, PriceSlider } from 'shared/view/components';
import {
checkboxesListData,
expandableCheckboxesListData,
richCheckboxesListData,
} from 'shared/view/components/CheckboxesList/CheckboxesList.fixture';
import {
guestsGroups,
guestsItems,
amenitiesItems,
} from 'shared/view/components/Dropdown/Dropdown.fixture';
import {
ArrowButton,
Benefits,
BulletList,
Button,
Expander,
Input,
LikeButton,
RadioButton,
StarRating,
TextButton,
Toggle,
UILogo,
} from 'shared/view/elements';
import { emailValidator, dateValidator, dateFormatMask } from 'utils/validators';
import * as S from './FormElementsPage.styles';
const FormElementsPage = memo(() => {
const handleFormSubmit = (): unknown => ({});
return (
<S.FormElementsPage>
<S.Logo>
<UILogo />
</S.Logo>
<Form
onSubmit={handleFormSubmit}
initialValues={{ gender: 'female', 'toggle-on': true, range: [5000, 10000] }}
render={(values) => (
<S.Content>
<div>
<S.InputWrapper>
<Field
name="email"
type="text"
render={({ input }) => (
<Input
{...input}
placeholder="Email"
label="text field"
validators={[emailValidator]}
/>
)}
/>
</S.InputWrapper>
<S.InputWrapper>
<Field
name="name"
type="text"
render={({ input }) => (
<Input {...input} placeholder="This is pretty awesome" label="text field" />
)}
/>
</S.InputWrapper>
<S.Title>Dropdown</S.Title>
<S.DropdownWrapper>
<Dropdown
placeholder="Сколько гостей"
enableControls={false}
name="amenities"
items={amenitiesItems}
/>
</S.DropdownWrapper>
<S.InputWrapper>
<Field
name="date"
type="text"
render={({ input }) => (
<Input
{...input}
placeholder="ДД.ММ.ГГГГ"
validators={[dateValidator]}
mask={dateFormatMask}
label="Masked text field"
/>
)}
/>
</S.InputWrapper>
<S.TimePickerWrapper>
<TimePicker
type="double"
name="elements-date"
dateFromLabelText="date dropdown"
dateToLabelText="date dropdown"
/>
</S.TimePickerWrapper>
<S.TimePickerWrapper>
<TimePicker
type="single"
name="elements-date"
dateFromLabelText="filter date dropdown"
/>
</S.TimePickerWrapper>
<S.Title>subscription text field</S.Title>
<S.SubscriptionWrapper>
<SubscriptionField placeholder="Email" />
</S.SubscriptionWrapper>
</div>
<div>
<S.Title>checkbox buttons</S.Title>
<S.CheckboxWrapper>
<CheckboxesList roomOptions={checkboxesListData} />
</S.CheckboxWrapper>
<S.Title>radio buttons</S.Title>
<S.RadioWrapper>
<RadioButton label="Мужчина" name="gender" value="male" />
<RadioButton label="Женщина" name="gender" value="female" />
</S.RadioWrapper>
<S.Title>toggle</S.Title>
<S.ToggleWrapper>
<Toggle name="toggle-on" label="Получать спецпредложения" />
</S.ToggleWrapper>
<S.ToggleWrapper>
<Toggle name="toggle-off" label="Получать спецпредложения" />
</S.ToggleWrapper>
<S.Title>like button</S.Title>
<S.LikeButtonWrapper>
<LikeButton count={2} />
</S.LikeButtonWrapper>
<S.Title>rate button</S.Title>
<S.StarRatingWrapper>
<StarRating rating={4} />
<StarRating rating={5} />
</S.StarRatingWrapper>
</div>
<div>
<S.SliderWrapper>
<PriceSlider
value={values.initialValues.range}
title="range slider"
name="range"
showValue
/>
</S.SliderWrapper>
<S.Title>buttons</S.Title>
<S.ButtonWrapper>
<Button type="button" isFilled>
click me
</Button>
</S.ButtonWrapper>
<S.ButtonWrapper>
<Button href="/mock-to-click">click me</Button>
</S.ButtonWrapper>
<S.TextButtonWrapper>
<TextButton href="/mock-to-click">Click me</TextButton>
</S.TextButtonWrapper>
<S.TextButtonWrapper>
<TextButton isSecondary>Click me</TextButton>
</S.TextButtonWrapper>
<S.ArrowButtonWrapper>
<ArrowButton href="/mock-to-click">Перейти к оплате</ArrowButton>
</S.ArrowButtonWrapper>
</div>
<div>
<S.Title>Dropdown</S.Title>
<S.DropdownWrapper>
<Dropdown
placeholder="Сколько гостей"
enableControls={false}
name="amenities"
items={amenitiesItems}
/>
</S.DropdownWrapper>
<S.Title>Dropdown</S.Title>
<S.DropdownWrapper>
<Dropdown
placeholder="Сколько гостей"
name="guests"
enableControls
groups={guestsGroups}
items={guestsItems}
/>
</S.DropdownWrapper>
<S.ExpandableCheckboxWrapper>
<Expander title="expandable checkbox list" isDefaultOpen={false}>
<CheckboxesList roomOptions={expandableCheckboxesListData} />
</Expander>
</S.ExpandableCheckboxWrapper>
<S.ExpandableCheckboxWrapper>
<Expander title="expandable checkbox list" isDefaultOpen>
<CheckboxesList roomOptions={expandableCheckboxesListData} />
</Expander>
</S.ExpandableCheckboxWrapper>
</div>
<div>
<S.Title>Dropdown</S.Title>
<S.DropdownWrapper>
<Dropdown
placeholder="Сколько гостей"
name="guests"
enableControls
groups={guestsGroups}
items={guestsItems}
/>
</S.DropdownWrapper>
<S.Title>rich checkbox</S.Title>
<S.RichCheckboxWrapper>
<CheckboxesList roomOptions={richCheckboxesListData} />
</S.RichCheckboxWrapper>
</div>
<div>
<S.Title>Dropdown</S.Title>
<S.DropdownWrapper>
<Dropdown
placeholder="Сколько гостей"
name="guests"
enableControls
groups={guestsGroups}
items={guestsItems}
/>
</S.DropdownWrapper>
<S.Title>bullet list</S.Title>
<S.BulletListWrapper>
<BulletList
items={[
'Нельзя с питомцами',
'Без вечеринок и мероприятий',
'Время прибытия — после 13:00, а выезд до 12:00',
]}
/>
</S.BulletListWrapper>
</div>
<S.BenefitsWrapper>
<Benefits
items={[
{
icon: 'insert_emoticon',
term: 'Комфорт',
definition: 'Шумопоглощающие стены',
},
{
icon: 'location_city',
term: 'Удобство',
definition: 'Окно в каждой из спален',
},
]}
/>
</S.BenefitsWrapper>
<S.ReviewsWrapper>
<Review
avatarUrl="user.jpg"
userName="Мурад Сарафанов"
date={new Date('2020-09-27 12:03:14')}
text="Великолепный матрас на кровати в основной спальне! А пуфик вообще потрясающий. И стены, действительно, шумоподавляющие. Выкрикивал комплименты повару — никто не жаловался из соседей."
likesCount={12}
/>
</S.ReviewsWrapper>
</S.Content>
)}
/>
</S.FormElementsPage>
);
});
export { FormElementsPage };
|
def menu(self):
background_image = pygame.image.load("images/menu.png").convert()
done = False
pygame.display.set_caption(constants.WINDOW_TITLE)
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return "Thanks for playing!"
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_1: done = self.instructions()
elif event.key == pygame.K_2: done = self.play()
elif event.key == pygame.K_3: done = True
self.screen.blit(background_image, [0, 0])
pygame.display.flip()
self.clock.tick(constants.REFRESH_RATE)
pygame.quit() |
/**
* Write result delimiter to output
* @param context
* @return number of bytes written
*/
static size_t writeDelimiter(scpi_t * context) {
if (context->output_count > 0) {
return writeData(context, ", ", 2);
} else {
return 0;
}
} |
/// Save resource. Return true if successful.
bool ObjectAnimation::Save(Serializer& dest) const
{
XMLFile xmlFile(context_);
XMLElement rootElem = xmlFile.CreateRoot("objectanimation");
if (!SaveXML(rootElem))
return false;
return xmlFile.Save(dest);
} |
// NewMockCreateUser creates a new mock instance.
func NewMockCreateUser(ctrl *gomock.Controller) *MockCreateUser {
mock := &MockCreateUser{ctrl: ctrl}
mock.recorder = &MockCreateUserMockRecorder{mock}
return mock
} |
// output adds the given paths as outputs to the task, which results in their
// contents being uploaded to the isolate server.
func (b *taskBuilder) output(paths ...string) {
for _, path := range paths {
if !In(path, b.Spec.Outputs) {
b.Spec.Outputs = append(b.Spec.Outputs, path)
}
}
} |
I think about email a lot because my company makes email software. I was talking with a friend yesterday and it got me thinking, just how much email is there, anyway?
So, I looked. A lot of email, it turns out. Like, a whole lot. In 2013, humanity sent about 150 billion emails each day. That’s 21 messages received per earthling per day, or 79 each if you only count actual email users.
Big Numbers
Big numbers are big. After a point, they become so immense that it’s impossible to have any real, intuitive understanding of what they mean.
This is a common problem in astronomy. For example, the sun is 93 million miles away. How far is that? I know how far a mile is because I know how long it takes to walk one. But a million miles? I’ve never consciously experienced a million of anything. Numbers like that are impossible to truly grasp, but we can try to visualize the distance with a little thought experiment:
At least one unsourced estimate says a person will travel 3,658,753 miles in their lifetime. My own estimate puts it closer to 6 million. Imagine that you lived your whole life moving in one direction, going from home to the office to the nightclub to home to the office, like those old cartoons where a character walks past the same lamp and table over and over again. You meet some cute person (“goin’ my way?”), and fall in love. Around age 30, you have a baby. Your child starts traveling with you, by car, train, and plane, a little closer to the sun each day.
If people reproduce about 1/3 of the way into their lives, it would take somewhere between 50 and 75 generations for your family to arrive at the surface of the sun. If you were born in the time of the Roman Empire, but with a modern life and an average 2014 commute, your descendants would be arriving about now.
So Much Email!
And, so it is with email. 150 billion is an impossibly large number, and that’s each day. If you printed one day of the world’s emails out, the stack of paper would be 10,000 miles high. A month of them would reach to the moon.
The average email is 75 kilobytes (except spam, which is about 5k and 69.6% of traffic), which means a day of email traffic is about 4 petabytes.
There’s 2.3 billion email users worldwide, and the average mailbox stores 8,024 messages. If we assume that most spam gets deleted, that puts the world’s total volume of stored email at 1,400 petabytes. Put another way: globally, we store about one year of email history. What can we compare this to in order to understand it?
Email vs the Web
The web is pretty big, but since it’s public we all have a sense of its vastness. It’s also hard to define: there’s sites like CNN.com, Wikipedia, or your favorite Taylor Swift forum, but what about your company’s intranet, your calendar app, or the configuration page for your fancy new bathroom scale? Also, with services like Gmail, the web includes most of the world’s email too.
A good definition of the web might be, “the stuff you can find by searching it.” Researchers call this the indexed web. Google says the web contains 30 trillion unique URLs. The average web page contains 96 of those objects, and is 1.6 megabytes in size. That puts the size of the indexed web at around 512 petabytes. So, email is about 3x the size of the web.
Remember that number from before? Email traffic is 69.6% spam. If there’s 3x as much as email as there is web, there’s 3x as much spam as there is email!
But, since spam is small and it tends to be deleted after about a month, it makes up comparatively little of the total volume of stored email (around 16PB):
A previous version of this article segmented out spam, but didn’t account for its size or deletion rate. So, you may see a much more alarming version of this chart floating around. Sorry about that!
I’m always so impressed with how effective modern spam filtering infrastructure has become.
Bringing this back to the personal level, it’s suddenly no surprise that some days, I spend half my time online reading email. In a sense, that’s okay: the ability to contact exactly the right group of people whenever I want is like a new superpower, and I shouldn’t take it for granted. But, I also think that for something that accounts for more than half of the Internet, email is pretty janky. We don’t collectively spend nearly enough of our resources making it work better.
I co-founded a company, Threadable, to address exactly this problem. We’ve chosen a small part of this space, the email discussion group, and we’re trying to bring it to the standard of usability, utility, and beauty that the Web has enjoyed for years. Other companies are doing good work in this area too, like SendWithUs, with their responsive templates, Mailgun’s development and testing workflow, and services like Litmus and Inbox. Together, these companies seek to smooth out the complex landscape of clients and protocols, in much the same way that jQuery revolutionized JavaScript development. And, we’re beginning to see the next generation of mail-first apps, like Square’s charming email interface for Square Cash. Still, more is needed.
Predicting that email is dead, to be supplanted by (choose one of: Facebook, Twitter, Asana, Yammer, Skype) is like saying apps are killing websites. The comparison only makes sense when you compare a creaky old site to a slick new app. Modern email has a lot of advantages over more proprietary solutions, not the least of which is its ubiquity. When I look at the numbers above, it seems like fixing email is a way simpler project than replacing it wholesale.
And really, we’re unlikely to be successful in replacing email. New communication technologies are accretive. The Internet’s been huge for a decade, and we still have telephones. When the phone was introduced, telegrams became largely irrelevant, but they stuck around for another hundred years. Do you remember the last fax you sent? Because I sure do (thanks, Anthem Blue Cross!).
Email isn’t dead. It’s just sleeping. Put the coffee on, because it’s time to wake it up! |
// User wants a different trust store than the keystore
func TestUserSuppliedTLSSecretWithSeparateTrustStore(t *testing.T) {
tlsSecretName := "tls-cert-secret-from-user"
keystorePassKey := "some-password-key-thingy"
instance := buildTestSolrCloud()
instance.Spec.SolrSecurity = &solr.SolrSecurityOptions{AuthenticationType: solr.Basic}
instance.Spec.SolrTLS = createTLSOptions(tlsSecretName, keystorePassKey, false)
trustStoreSecretName := "custom-truststore-secret"
trustStoreFile := "truststore.p12"
instance.Spec.SolrTLS.TrustStoreSecret = &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: trustStoreSecretName},
Key: trustStoreFile,
}
instance.Spec.SolrTLS.TrustStorePasswordSecret = &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: trustStoreSecretName},
Key: "truststore-pass",
}
instance.Spec.SolrTLS.ClientAuth = solr.Need
verifyUserSuppliedTLSConfig(t, instance.Spec.SolrTLS, tlsSecretName, keystorePassKey, tlsSecretName)
verifyReconcileUserSuppliedTLS(t, instance, false, false)
} |
// SciTE - Scintilla based Text Editor
/** @file SciTE.h
** Define command IDs used within SciTE.
**/
// Copyright 1998-2003 by <NAME> <<EMAIL>>
// The License.txt file describes the conditions under which this software may be distributed.
#ifndef SCITE_H
#define SCITE_H
// Menu IDs.
// These are located 100 apart. No one will want more than 100 in each menu ;)
#define IDM_MRUFILE 1000
#define IDM_TOOLS 1100
#define IDM_BUFFER 1200
#define IDM_IMPORT 1300
#define IDM_LANGUAGE 1400
// File
#define IDM_NEW 101
#define IDM_OPEN 102
#define IDM_OPENSELECTED 103
#define IDM_REVERT 104
#define IDM_CLOSE 105
#define IDM_SAVE 106
#define IDM_SAVEAS 110
#define IDM_SAVEASHTML 111
#define IDM_SAVEASRTF 112
#define IDM_SAVEASPDF 113
#define IDM_FILER 114
#define IDM_SAVEASTEX 115
#define IDM_SAVEACOPY 116
#define IDM_SAVEASXML 117
#define IDM_MRU_SEP 120
#define IDM_PRINTSETUP 130
#define IDM_PRINT 131
#define IDM_LOADSESSION 132
#define IDM_SAVESESSION 133
#define IDM_QUIT 140
#define IDM_ENCODING_DEFAULT 150
#define IDM_ENCODING_UCS2BE 151
#define IDM_ENCODING_UCS2LE 152
#define IDM_ENCODING_UTF8 153
#define IDM_ENCODING_UCOOKIE 154
#define MRU_START 16
#define IMPORT_START 19
#define TOOLS_START 3
// Edit
#define IDM_UNDO 201
#define IDM_REDO 202
#define IDM_CUT 203
#define IDM_COPY 204
#define IDM_PASTE 205
#define IDM_CLEAR 206
#define IDM_SELECTALL 207
#define IDM_PASTEANDDOWN 208
#define IDM_FIND 210
#define IDM_FINDNEXT 211
#define IDM_FINDNEXTBACK 212
#define IDM_FINDNEXTSEL 213
#define IDM_FINDNEXTBACKSEL 214
#define IDM_FINDINFILES 215
#define IDM_REPLACE 216
#define IDM_GOTO 220
#define IDM_BOOKMARK_NEXT 221
#define IDM_BOOKMARK_TOGGLE 222
#define IDM_BOOKMARK_PREV 223
#define IDM_BOOKMARK_CLEARALL 224
#define IDM_BOOKMARK_NEXT_SELECT 225
#define IDM_BOOKMARK_PREV_SELECT 226
#define IDM_MATCHBRACE 230
#define IDM_SELECTTOBRACE 231
#define IDM_SHOWCALLTIP 232
#define IDM_COMPLETE 233
#define IDM_COMPLETEWORD 234
#define IDM_EXPAND 235
#define IDM_TOGGLE_FOLDALL 236
#define IDM_TOGGLE_FOLDRECURSIVE 237
#define IDM_EXPAND_ENSURECHILDRENVISIBLE 238
#define IDM_UPRCASE 240
#define IDM_LWRCASE 241
#define IDM_ABBREV 242
#define IDM_BLOCK_COMMENT 243
#define IDM_STREAM_COMMENT 244
#define IDM_COPYASRTF 245
#define IDM_BOX_COMMENT 246
#define IDM_INS_ABBREV 247
#define IDM_JOIN 248
#define IDM_SPLIT 249
#define IDM_DUPLICATE 250
#define IDM_INCSEARCH 252
#define IDM_ENTERSELECTION 256
#define IDC_INCFINDTEXT 253
#define IDC_INCFINDBTNOK 254
#define IDD_FIND2 255
#define IDC_EDIT1 1000
#define IDC_STATIC -1
#define IDM_PREVMATCHPPC 260
#define IDM_SELECTTOPREVMATCHPPC 261
#define IDM_NEXTMATCHPPC 262
#define IDM_SELECTTONEXTMATCHPPC 263
// Tools
#define IDM_COMPILE 301
#define IDM_BUILD 302
#define IDM_GO 303
#define IDM_STOPEXECUTE 304
#define IDM_FINISHEDEXECUTE 305
#define IDM_NEXTMSG 306
#define IDM_PREVMSG 307
#define IDM_MACRO_SEP 310
#define IDM_MACRORECORD 311
#define IDM_MACROSTOPRECORD 312
#define IDM_MACROPLAY 313
#define IDM_MACROLIST 314
#define IDM_ACTIVATE 320
#define IDM_SRCWIN 350
#define IDM_RUNWIN 351
#define IDM_TOOLWIN 352
#define IDM_STATUSWIN 353
#define IDM_TABWIN 354
// Options
#define IDM_SPLITVERTICAL 401
#define IDM_VIEWSPACE 402
#define IDM_VIEWEOL 403
#define IDM_VIEWGUIDES 404
#define IDM_SELMARGIN 405
#define IDM_FOLDMARGIN 406
#define IDM_LINENUMBERMARGIN 407
#define IDM_VIEWTOOLBAR 408
#define IDM_TOGGLEOUTPUT 409
#define IDM_VIEWTABBAR 410
#define IDM_VIEWSTATUSBAR 411
#define IDM_TOGGLEPARAMETERS 412
#define IDM_OPENFILESHERE 413
#define IDM_WRAP 414
#define IDM_WRAPOUTPUT 415
#define IDM_READONLY 416
#define IDM_CLEAROUTPUT 420
#define IDM_SWITCHPANE 421
#define IDM_EOL_CRLF 430
#define IDM_EOL_CR 431
#define IDM_EOL_LF 432
#define IDM_EOL_CONVERT 433
#define IDM_TABSIZE 440
#define IDM_MONOFONT 450
#define IDM_OPENLOCALPROPERTIES 460
#define IDM_OPENUSERPROPERTIES 461
#define IDM_OPENGLOBALPROPERTIES 462
#define IDM_OPENABBREVPROPERTIES 463
#define IDM_OPENLUAEXTERNALFILE 464
//#define IDM_SELECTIONMARGIN 490
//#define IDM_BUFFEREDDRAW 491
//#define IDM_USEPALETTE 492
// Buffers
#define IDM_PREVFILE 501
#define IDM_NEXTFILE 502
#define IDM_CLOSEALL 503
#define IDM_SAVEALL 504
#define IDM_BUFFERSEP 505
#define IDM_PREVFILESTACK 506
#define IDM_NEXTFILESTACK 507
// Help
#define IDM_HELP 901
#define IDM_ABOUT 902
#define IDM_HELP_SCITE 903
// Windows specific windowing options
#define IDM_ONTOP 960
#define IDM_FULLSCREEN 961
// Dialog control IDs
#define IDGOLINE 220
#define IDABOUTSCINTILLA 221
#define IDFINDWHAT 222
#define IDFILES 223
#define IDDIRECTORY 224
#define IDCURRLINE 225
#define IDLASTLINE 226
#define IDEXTEND 227
#define IDTABSIZE 228
#define IDINDENTSIZE 229
#define IDUSETABS 230
#define IDREPLACEWITH 231
#define IDWHOLEWORD 232
#define IDMATCHCASE 233
#define IDDIRECTIONUP 234
#define IDDIRECTIONDOWN 235
#define IDREPLACE 236
#define IDREPLACEALL 237
#define IDREPLACEINSEL 238
#define IDREGEXP 239
#define IDWRAP 240
#define IDUNSLASH 241
#define IDCMD 242
// id for the browse button in the grep dialog
#define IDBROWSE 243
#define IDABBREV 244
#define IDREPLACEINBUF 244
#define IDMARKALL 245
#define IDGOLINECHAR 246
#define IDCURRLINECHAR 247
#define IDREPLDONE 248
#define IDDOTDOT 249
#define IDFINDINSTYLE 250
#define IDFINDSTYLE 251
#define IDCONVERT 252
#define IDPARAMSTART 300
// Dialog IDs
#define IDD_FIND 400
#define IDD_REPLACE 401
#define IDD_BUFFERS 402
#define IDD_FIND_ADV 403
#define IDD_REPLACE_ADV 404
// Resource IDs
#define IDR_CLOSEFILE 100
#endif
|
// compute the evals and S/N ratios. Will ignore the top 'shave' most points until r2 meets the min requirement
public Histogram fitEval(int samples, int shave, double min_r2)
{
if (mData==null) return this;
Collections.sort(mData, Ordering.natural().reverse());
List<Double> data = new ArrayList<>(getData());
if (data==null || data.size()<3) return this;
List<WeightedObservedPoint> Rs = new ArrayList<>();
double decoys = data.size(), survived=0, end=(int )Math.min(samples, decoys);
for (Double d : data)
{
Rs.add(new WeightedObservedPoint(d, d, Math.log10(++survived/decoys)));
if (Rs.size()>end) break;
}
try
{
mSurvivalFitted = new Fitted().fit(1, Rs);
if (mSurvivalFitted.getR2()<min_r2 && shave>0)
for (int i=0; i<shave; i++)
{
Rs.remove(0);
Fitted fit = new Fitted().fit(1, Rs);
if (fit!=null && fit.getR2()<mSurvivalFitted.getR2()) break;
if (fit!=null && fit.getR2()>mSurvivalFitted.getR2()) mSurvivalFitted=fit;
if (mSurvivalFitted.getR2()>=min_r2) break;
}
data=(List )Tools.dispose(data);
Rs =(List )Tools.dispose(Rs);
}
catch (Exception e)
{
e.printStackTrace();
}
return this;
} |
/* detect_res
detects the combined resolution of all monitors attached to the computer
*/
void detect_res(void) {
uint32_t count = 0, chars = 0;
CGGetOnlineDisplayList(UINT32_MAX, NULL, &count);
CGDirectDisplayID displays[count];
CGGetOnlineDisplayList(count, displays, &count);
chars += snprintf(res_str, MAX_STRLEN, "%zu x %zu", CGDisplayPixelsWide(*displays),
CGDisplayPixelsHigh(*displays));
for (int i = 1; i < count; ++i) {
chars += snprintf(res_str + chars, MAX_STRLEN, ", %zu x %zu", CGDisplayPixelsWide(displays[i]),
CGDisplayPixelsHigh(displays[i]));
}
return;
} |
/**
* An Event is emitted by the Platform Service to all attached listeners.
* @author Desonte 'drjoliv' Jolivet : [email protected]
*/
public abstract class Event {
private final Object source;
public Event(Object source) {
this.source = source;
}
/**
* Returns the source of this event.
* @return the source
*/
public Object getSource() {
return source;
}
/**
* A generic event only containing the source object.
*/
public final class GenericEvent extends Event {
public GenericEvent(Object source) {
super(source);
}
}
/**
* An event emitted whenever a node is added to the jBullet database.
*/
public final class CreateEvent extends Event {
private final Node node;
public CreateEvent ( Object source, Node node ) {
super(source);
this.node = node;
}
/**
* Returns a node that has been recently inserted into jBullet database.
* @return the node
*/
public Node getNode() {
return node;
}
}
/**
* An event emitted whenever a node is updated within the jBullet database.
*/
public final class UpdateEvent extends Event{
private final Node node;
private UpdateEvent (Object source, Node node) {
super(source);
this.node = node;
}
/**
* Returns a node that has been recently updated within jBullet database.
* @return the node
*/
public Node getNode() {
return node;
}
}
/**
* An event emitted whenever a node is removed from the jBullet database.
*/
public final class DeleteEvent extends Event {
private final Node node;
private DeleteEvent (Object source, Node node) {
super(source);
this.node = node;
}
/**
* Returns a node that has recently been removed from the jBullet database.
* @return the node
*/
public Node getNode() {
return node;
}
}
} |
/**
* Created by zcrpro on 2018/1/4.
*/
public class HelpOldActivity extends Activity {
@InjectView(R.id.account)
TextView account;
@InjectView(R.id.notice_btn)
Button noticeBtn;
@InjectView(R.id.main_view)
RelativeLayout mainView;
@InjectView(R.id.title_icon)
ImageView titleIcon;
@InjectView(R.id.telTxt)
TextView telTxt;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.axxxy_old_help);
ButterKnife.inject(this);
if (App.loanAccountInfoRep!=null){
for (int i = 0; i <App.loanAccountInfoRep.loan_account_info.valid_loan_way_account_info.size() ; i++) {
if (App.loanAccountInfoRep.loan_account_info.valid_loan_way_account_info.get(i).loan_way.type.equals("GuiYangCreditLoanPay")){
account.setText(optimizeAccount(App.loanAccountInfoRep.loan_account_info.valid_loan_way_account_info.get(i).card_no));
}
}
}
findViewById(R.id.mainview).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
finish();
}
});
// String a = "<font color='#c4c4c4'>默认还账单,若要提前还贷款本金,需拨打</font><font color='#208af0'>400-288-4028</font><font color='#c4c4c4'>申请</font>";
// telTxt.setText(Html.fromHtml(a));
noticeBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
finish();
}
});
}
private String optimizeAccount(String account){
String optimAccount = "";
for(int i = 0;i < account.length();i++){
if(i % 4 == 0 && i != 0){
optimAccount += " ";
optimAccount += account.charAt(i)+"";
} else{
optimAccount += account.charAt(i)+"";
}
}
return optimAccount;
}
} |
The aftermath of the Paris terror attacks included calls for calm and solidarity—and some dangerous rumour-mongering and distortions.
Amid the swirl of misinformation, a Sikh man from Canada found himself accused by the internet of being one of the terrorists in the deadly Paris attacks.
It began after a bathroom selfie of Veerender Jubbal was photoshopped and then put into circulation online on Saturday (Nov. 14). The original image was simple: Jubbal was standing in a bathroom, holding an iPad and taking a self-portrait off the mirror.
The photograph was doctored to add a vest on Jubbal’s torso, with some visible wires. His iPad was turned into a Quran.
Apart from the fact that Jubbal’s turban clearly identifies him as Sikh, there were a number of other inconsistencies in the photoshopped hoax—including North American power outlets and a sex toy, as identified by BuzzFeed. Also, as the Sydney Morning Herald succinctly explained, “Korans generally cannot take selfies.”
Nonetheless, the hoax was convincing enough for La Razon—a major Spanish daily newspaper—which carried Jubbal’s picture on its frontpage (top left corner) on Sunday.
A number of other European outlets also carried the photoshopped image.
Although the perpetrators of the hoax haven’t been identified, Jubbal holds GamerGate—an online movement within the video game community—responsible for the incident. And this isn’t the first time GamerGate has attacked Jubbal: About 10 months ago, members of the group apparently flooded Jubbal’s Twitter timeline with images from 9/11.
After 9/11, many Sikhs in America were mistaken for Muslims. There were multiple attacks on Sikh men following the terror attack, including the murder of Arizona gas station owner Balbir Singh Sodhi and the assault on a Sikh temple in Oak Creek. |
<reponame>davideby/gsea-desktop<filename>src/main/java/org/genepattern/modules/Chip2ChipWrapper.java
/*
* Copyright (c) 2003-2022 Broad Institute, Inc., Massachusetts Institute of Technology, and Regents of the University of California. All rights reserved.
*/
package org.genepattern.modules;
import java.io.File;
import java.io.IOException;
import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.mit.broad.genome.Conf;
import xtools.api.AbstractTool;
import xtools.chip2chip.Chip2Chip;
/**
* Chip2ChipWrapper parses the command line arguments passed in by GP Server's run task page, creates a new parameter file, and passes that
* parameter file to the GSEA tool's main method. Upon completion of Chip2Chip, Chip2ChipWrapper creates a zip file containing results and
* then cleans up the working directory so it only contains the zip file, the output files, and the input files that were uploaded by the
* run task page.
*/
public class Chip2ChipWrapper extends AbstractModule {
private static final Logger klog = LoggerFactory.getLogger(Chip2ChipWrapper.class);
// Suppressing the static-access warnings because this is the recommended usage according to the Commons-CLI docs.
@SuppressWarnings("static-access")
private static Options setupCliOptions() {
Options options = new Options();
options.addOption(OptionBuilder.withArgName("chipPlatform").hasArg().create("chip"));
options.addOption(OptionBuilder.withArgName("geneSetsDatabaseList").hasOptionalArg().create("gmx_list"));
options.addOption(OptionBuilder.withArgName("geneSetsDatabase").hasArg().create("gmx"));
options.addOption(OptionBuilder.withArgName("geneSetMatrixFormat").hasArg().create("genesetmatrix_format"));
options.addOption(OptionBuilder.withArgName("showEtiology").hasArg().create("show_etiology"));
options.addOption(OptionBuilder.withArgName("selectedGeneSets").hasArg().create("selected_gene_sets"));
options.addOption(OptionBuilder.withArgName("altDelim").hasArg().create("altDelim"));
options.addOption(OptionBuilder.withArgName("createZip").hasArg().create("zip_report"));
options.addOption(OptionBuilder.withArgName("outFile").hasArg().create("out"));
options.addOption(OptionBuilder.withArgName("reportLabel").hasArg().create("rpt_label"));
options.addOption(OptionBuilder.withArgName("parameterFile").hasArg().create("param_file"));
options.addOption(OptionBuilder.withArgName("devMode").hasArg().create("dev_mode"));
options.addOption(OptionBuilder.withArgName("gpModuleMode").hasArg().create("run_as_genepattern"));
return options;
}
public static void main(final String[] args) throws Exception {
// Success flag. We set this to *false* until proven otherwise by a successful Tool run. This saves having to catch
// all manner of exceptions along the way; just allow them to propagate to the top-level handler.
boolean success = false;
AbstractTool tool = null;
File analysis = null;
File tmp_working = null;
File cwd = null;
try {
Options opts = setupCliOptions();
CommandLineParser parser = new PosixParser();
CommandLine cl = parser.parse(opts, args);
// We want to check *all* params before reporting any errors so that the user sees everything that went wrong.
boolean paramProcessingError = false;
// Properties object to gather parameter settings to be passed to the Tool
Properties paramProps = new Properties();
// The GP modules should declare they are running in GP mode. This has minor effects on the error messages
// and runtime behavior.
boolean gpMode = StringUtils.equalsIgnoreCase(cl.getOptionValue("run_as_genepattern"), "true");
String paramFileOption = cl.getOptionValue("param_file");
boolean hasParamFile = StringUtils.isNotBlank(paramFileOption);
if (gpMode) {
// Turn off debugging in the GSEA code and tell it not to create directories
// TODO: confirm the "mkdir" property works as expected
System.setProperty("debug", "false");
System.setProperty("mkdir", "false");
// Set the GSEA update check String to show this is coming from the modules.
System.setProperty("UPDATE_CHECK_EXTRA_PROJECT_INFO", "GP_MODULES");
String outOption = cl.getOptionValue("out");
if (StringUtils.isNotBlank(outOption)) {
klog.warn("-out parameter ignored; only valid wih -run_as_genepattern false.");
}
if (hasParamFile) {
klog.warn("-param_file parameter ignored; only valid wih -run_as_genepattern false.");
hasParamFile = false;
}
// Define a working directory, to be cleaned up on exit. The name starts with a '.' so it's hidden from GP & file system.
// Also, define a dedicated directory for building the report output
cwd = new File(System.getProperty("user.dir"));
tmp_working = new File(".tmp_gsea");
analysis = new File(tmp_working, "analysis");
analysis.mkdirs();
} else {
// Set the GSEA update check String to show this is CLI usage.
System.setProperty("UPDATE_CHECK_EXTRA_PROJECT_INFO", "GSEA_CLI");
}
// Enable any developer-only settings. For now, this just disables the update check; may do more in the future
boolean devMode = StringUtils.equalsIgnoreCase(cl.getOptionValue("dev_mode"), "true");
if (devMode) {
System.setProperty("MAKE_GSEA_UPDATE_CHECK", "false");
}
boolean createZip = StringUtils.equalsIgnoreCase(cl.getOptionValue("zip_report"), "true");
// Convert the format string passed by GP into the tokens expected by GSEA.
String outputFileFormat = cl.getOptionValue("genesetmatrix_format");
outputFileFormat = (StringUtils.equalsIgnoreCase(outputFileFormat, "gmx")) ? "GeneSetMatrix[gmx]"
: "GeneSetMatrix_Transposed[gmt]";
String chipPlatformFileName = cl.getOptionValue("chip");
if (StringUtils.isNotBlank(chipPlatformFileName)) {
if (gpMode) {
chipPlatformFileName = copyFileWithoutBadChars(chipPlatformFileName, tmp_working);
paramProcessingError |= (chipPlatformFileName == null);
}
} else if (!hasParamFile) {
// Note that we don't check this here if a param_file is specified; we will let the tool
// check it as it may exist in the file (in fact that's likely). This same pattern will
// follow for other parameters below.
String paramName = (gpMode) ? "chip.platform.file" : "-chip";
klog.error("Required parameter '{}' not found", paramName);
paramProcessingError = true;
}
String rptLabel = cl.getOptionValue("rpt_label");
if (StringUtils.isBlank(rptLabel)) {
rptLabel = "my_analysis";
}
String geneSetDBParam = cl.getOptionValue("gmx");
String geneSetDBListParam = cl.getOptionValue("gmx_list");
String selectedGeneSetsParam = cl.getOptionValue("selected_gene_sets");
String altDelim = cl.getOptionValue("altDelim", "");
if (StringUtils.isNotBlank(altDelim) && altDelim.length() > 1 && !hasParamFile) {
String paramName = (gpMode) ? "alt.delim" : "--altDelim";
klog.error("Invalid {} '{}' specified. This must be only a single character and no whitespace.", paramName, altDelim);
paramProcessingError = true;
}
String geneSetsSelector = determineSelectorFromParams(geneSetDBParam, geneSetDBListParam, selectedGeneSetsParam, altDelim,
gpMode, tmp_working, klog, hasParamFile);
paramProcessingError |= geneSetsSelector == null;
if (paramProcessingError) {
// Should probably use BadParamException and set an errorCode, use it to look up a Wiki Help page.
throw new Exception("There were one or more errors with the job parameters. Please check log output for details.");
}
klog.info("Parameters passing to Chip2Chip tool:");
setParam("gmx", geneSetsSelector, paramProps, klog);
setParam("chip_target", chipPlatformFileName, paramProps, klog);
setParam("genesetmatrix_format", outputFileFormat, paramProps, klog);
setParam("rpt_label", rptLabel, paramProps, klog);
setParam("zip_report", Boolean.toString(createZip), paramProps, klog);
setParam("gui", "false", paramProps, klog);
if (gpMode) {
setParam("out", analysis.getPath(), paramProps, klog);
} else {
// For regular CLI mode just pass through -out instead of setting tmpdir
setOptionValueAsParam("out", cl, paramProps, klog);
}
if (StringUtils.isNotBlank(altDelim)) {
setParam("altDelim", altDelim, paramProps, klog);
}
// Finally, load up the remaining simple parameters. We'll let Chip2Chip validate these.
setOptionValueAsParam("show_etiology", cl, paramProps, klog);
if (!hasParamFile) paramFileOption = "";
tool = new Chip2Chip(paramProps, paramFileOption);
try {
success = AbstractTool.module_main(tool);
} finally {
try {
if (!analysis.exists()) return;
copyAnalysisToCurrentDir(cwd, analysis, createZip, "chip2chip_results.zip");
} catch (IOException ioe) {
System.err.println("Error during clean-up:");
throw ioe;
}
}
} catch (Throwable t) {
success = false;
klog.error("Error while processing:");
klog.error(t.getMessage());
t.printStackTrace(System.err);
} finally {
try {
if (cwd != null && tmp_working != null) {
cleanUpAnalysisDirs(cwd, tmp_working);
}
} finally {
Conf.exitSystem(!success);
}
}
}
} |
<reponame>michaelruocco/exercises
package uk.co.mruoc.exercises.batch;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import java.util.Collection;
import java.util.Collections;
@RequiredArgsConstructor
@Data
public class MessageBatch {
private final Collection<Message> messages;
public Collection<Message> getMessages() {
return Collections.unmodifiableCollection(messages);
}
}
|
/**
* Gets a resource as a URL. Resources are all assumed to be in the
* central "Resources" directory.
*
* @param filename The filename of the resource (not the path)
* For example: "foo.gif"
*/
public static URL toURL(final String filename) {
return JarResources.class.getResource(PKG_RESOURCE + filename);
} |
def linear_sum_assignment(costs, solver=None):
costs = np.asarray(costs)
if not costs.size:
return np.array([], dtype=int), np.array([], dtype=int)
solver = solver or default_solver
if isinstance(solver, str):
solver = solver_map.get(solver, None)
assert callable(solver), 'Invalid LAP solver.'
rids, cids = solver(costs)
rids = np.asarray(rids).astype(int)
cids = np.asarray(cids).astype(int)
return rids, cids |
Income Inequality and Current Account Imbalances
This paper studies the empirical and theoretical link between increases in income inequality and increases in current account deficits. Cross-sectional econometric evidence shows that higher top income shares, and also financial liberalization, which is a common policy response to increases in income inequality, are associated with substantially larger external deficits. To study this mechanism we develop a DSGE model that features workers whose income share declines at the expense of investors. Loans to workers from domestic and foreign investors support aggregate demand and result in current account deficits. Financial liberalization helps workers smooth consumption, but at the cost of higher household debt and larger current account deficits. In emerging markets, workers cannot borrow from investors, who instead deploy their surplus funds abroad, leading to current account surpluses instead of deficits. |
Pediatric 320-row cardiac computed tomography using electrocardiogram-gated model-based full iterative reconstruction
Background Full iterative reconstruction algorithm is available, but its diagnostic quality in pediatric cardiac CT is unknown. Objective To compare the imaging quality of two algorithms, full and hybrid iterative reconstruction, in pediatric cardiac CT. Materials and methods We included 49 children with congenital cardiac anomalies who underwent cardiac CT. We compared quality of images reconstructed using the two algorithms (full and hybrid iterative reconstruction) based on a 3-point scale for the delineation of the following anatomical structures: atrial septum, ventricular septum, right atrium, right ventricle, left atrium, left ventricle, main pulmonary artery, ascending aorta, aortic arch including the patent ductus arteriosus, descending aorta, right coronary artery and left main trunk. We evaluated beam-hardening artifacts from contrast-enhancement material using a 3-point scale, and we evaluated the overall image quality using a 5-point scale. We also compared image noise, signal-to-noise ratio and contrast-to-noise ratio between the algorithms. Results The overall image quality was significantly higher with full iterative reconstruction than with hybrid iterative reconstruction (3.67±0.79 vs. 3.31±0.89, P=0.0072). The evaluation scores for most of the gross structures were higher with full iterative reconstruction than with hybrid iterative reconstruction. There was no significant difference between full and hybrid iterative reconstruction for the presence of beam-hardening artifacts. Image noise was significantly lower in full iterative reconstruction, while signal-to-noise ratio and contrast-to-noise ratio were significantly higher in full iterative reconstruction. Conclusion The diagnostic quality was superior in images with cardiac CT reconstructed with electrocardiogram-gated full iterative reconstruction.
Introduction
Cardiac CT is increasingly used for the diagnosis of infantile and pediatric heart disease . Three-dimensional (3-D) isovolumetric datasets obtained from cardiac CT robustly assist morphological assessment by ultrasonography. However exposure to ionizing radiation remains a concern, particularly in infantile and pediatric populations.
Several technologies have been introduced to reduce the radiation dose in diagnostic imaging, such as prospective electrocardiogram-triggering and iterative reconstruction methods, which enable good image quality at lower radiation exposure in pediatric CT . Major CT vendors have developed hybrid iterative reconstruction techniques that are a combination of iterative reconstruction and filtered back-projection . Unlike hybrid iterative reconstruction, full iterative reconstruction techniques are based on both forward and backward projection. With backward projection steps, images are created using the projection data. Conversely, with forward projection steps projection data are created using the image data. The forward and backward projections are repeated until they do not change in subsequent iterations or until the maximum number of iterations is reached . Full iterative reconstruction requires high computation power for image reconstruction, and this is one of the reasons it has taken until now for the development of full iterative reconstruction algorithms.
Three major CT vendors have recently introduced the following advanced model-based full iterative reconstruction algorithms: Veo (GE Healthcare, Waukesha, WI) and Iterative Model Reconstruction (IMR; Philips Healthcare, Best, The Netherlands) , and forward projected model-based iterative reconstruction solution (FIRST; Toshiba Medical Systems, Tokyo, Japan). Among these three full iterative reconstruction algorithms, FIRST is the only algorithm that can be combined with electrocardiogram-gated scan.
A previous study employing conventional radiation doses revealed that images obtained by the electrocardiogram-gated full iterative reconstruction method had less perceived image noise and better tissue contrast at similar resolution compared with an existing hybrid iterative reconstruction algorithm, namely Adaptive Iterative Dose Reduction 3D (AIDR 3D; Toshiba Medical Systems), which has been designed to be fully integrated into the automatic exposure control to reduce the radiation dose in pediatric cardiac CT . However no study has evaluated the feasibility of this method in terms of the imaging quality for diagnosis of congenital heart disease and the ability to delineate cardiac anatomical structures in children.
Therefore the aim of this study was to compare the diagnostic quality of two reconstruction algorithms, full iterative reconstruction and hybrid iterative reconstruction, in low-dose 320-row pediatric cardiac CT, particularly in terms of delineation of cardiac anatomical structures of children.
Patients
We obtained parental written informed consent for contrastenhanced CT in all children. The local ethics committee approved the study protocol and waived the requirement for informed consent to retrospectively review the CT examinations.
We retrospectively examined CT images of children with congenital heart disease requiring surgical or catheter intervention and without renal dysfunction (effective glomerular filtration rate<40 mL/min) who underwent cardiac CT from September 2015 to March 2016. We included CT images of 49 children (ages 5 days to 5 years 10 months, median 122 days; 25 males and 24 females; body weight 2.5-28.0 kg, median 5.0 kg).
In total, 43/49 children had complex (≥2) congenital heart diseases, while 6 had a single disease. Types of congenital heart disease are shown in Table 1. Twenty-four examinations were performed before surgical or catheter intervention, while 25 examinations were performed after ≥1 surgical or catheter intervention. Procedure types performed prior to examinations are shown in Table 2. One child had an implanted pacemaker before CT scan; however because the generator was implanted Of the 49 children enrolled in this study, 43 had two or more defects A total of 24 examinations were performed before surgical or catheter intervention, while 25 examinations were performed after one or more surgical or catheter interventions a The generator was implanted in the abdominal wall so the metal artifact from the generator and the pacemaker lead was negligible in our image analysis in the abdominal wall, the metal artifact from the generator and the pacemaker lead was negligible in our image analysis.
CT data acquisition and image reconstruction
All children underwent angiography via second-generation 320-row CT (Aquilion ONE ViSION edition; Toshiba Medical Systems, Tochigi, Japan) with electrocardiogramgated axial scans. Scan parameters were as follows: tube potential, 80 kVp; gantry rotation time, 275 ms; and tube current determined by auto exposure control (a predetermined level of image noise set at a standard deviation of 40). Children in the study received contrast enhancement material at 2 mL/kg body weight of 300 mgI/mL of iohexol (Omnipaque 300, 300 mg/mL; Daiichi Sankyo, Tokyo, Japan).
For children <6 months or with a body weight <5 kg, the contrast-enhancement material was diluted by adding normal saline at one-half volume of the material and injected at a rate of 0.5 mL/s. For patients >6 months or with a body weight of >5 kg, undiluted contrast-enhancement material was injected at a rate of 1.0 mL/s. For each child, an experienced cardiovascular radiologist and senior technologist determined the phase with minimum artifacts at the CT console. Multiple phases were reconstructed if image artifacts persisted. The slice thickness of reconstructed images was 0.50 mm with increments of 0.25 mm. Images were reconstructed using two algorithms: a medium soft-tissue kernel (FC04) with a hybrid iterative reconstruction algorithm (AIDR 3D enhanced strong mode with SURE Exposure) and full iterative reconstruction (FIRST in the "cardiac strong" mode).
Radiation dose
Radiation dose exposure was assessed as the volume CT dose index (CTDI vol , mGy) and dose-length product (mGy·cm). With the dose-length product displayed by the CT system after the examination (phantom size of 32 cm), the effective dose, E, for each child was calculated as follows: E=k x dose-length product, where k is the conversion coefficient for chest CT at 80 kV with values of 0.0823, 0.0525, 0.0344 and 0.248 for newborns, children age <1, age 1-4 and age 5-10 years, respectively, based on a previous report . Size-specific dose estimates (SSDEs) were also calculated. Anterior-posterior and lateral diameters were measured on transverse CT images at the level of the aortic valve. For the sum of these diameters in each child, conversion factors were chosen from the table of the report of AAPM task group 204 for a phantom size of 32 cm . SSDEs were calculated as the CT dose index × conversion factor (mGy).
Subjective image analysis
Subjective image quality was rated by two cardiovascular radiologists (G.S. and E.M., with 7 years and 15 years of experience in pediatric and cardiovascular radiology, respectively), who were blinded to the details of the CT datasets, which were provided in a randomized order.
A 3-point scale (3=diagnostic/2=diagnostic with limitations/ 1=non-diagnostic) was used to score the delineation of the following anatomical structures: atrial septum, ventricular septum, right atrium, right ventricle, left atrium (including the pulmonary veins), left ventricle, main pulmonary artery, aortic arch (including the patent ductus arteriosus), descending aorta and coronary arteries (right coronary artery and left main trunk).
Artifacts from medical devices (e.g., electrodes and cables for electrocardiography) were not evaluated. Before the scan, metallic items were positioned as far as possible from the scan range.
A 3-point scale was used to evaluate the presence of beamhardening artifact from contrast-enhancement material: 3=no artifact, 2=mild artifact that still allowed evaluation of the (surrounding) anatomical structures, and 1=severe artifact that precluded evaluation. Representative images for three levels of beam-hardening artifact are shown in Fig. 1. Fig. 1 Representative axial contrast-enhanced CT images for the three levels of beam-hardening artifacts. a Score 3, no beam-hardening artifact, in a 1-month-old boy with single atrium, single ventricle, triscupid atresia and double-outlet of the right ventricle. b Score 2, mild artifact, defined as images that still allow evaluation of the surrounding anatomical structures. This image is in a 7-day-old boy with single atrium, single ventricle, pulmonary artery stenosis and total anomalous pulmonary venous return. c Score 1, severe artifacts that preclude evaluation, in a 10-day-old girl with atrial septal defects and ventricular septal defect The overall image quality was evaluated using the following 5-point scale: 5=excellent anatomical clarity and image quality; 4=good anatomical clarity and image quality with minor motion artifact; 3=fair image quality with motion artifact extending <5 mm from the vessel center; 2=poor image quality (inadequate delineation between the vessel and surrounding tissue, presence of streak artifacts extending at least 5 mm from the center of the vessel, and no useful information obtained); and 1=non-diagnostic image quality. Representative images of five levels of the overall image quality are shown in Fig. 2.
Subjective image quality was defined as diagnostic when the scores were equal to or greater than 2 on the 3-point scale and equal to or greater than 3 on the 5-point scale. The assessment scale for image quality was based on a previous pediatric study using cardiovascular CT angiography .
Objective image analysis
For objective image analysis, we calculated image noise, signal-to-noise ratio (SNR) and contrast-to-noise ratio (CNR) according to the method proposed by Pflederer et al. . Regions of interest were defined on an axial image at the level of the proximal ascending aorta. The average CT number and noise were recorded using a circular region of interest. The region of interest was made as large as possible while carefully avoiding inclusion of the vessel wall to prevent partial volume effects. A region of interest was placed Fig. 2 Representative axial contrast-enhanced CT images of the five levels of overall image quality. a Score 5, excellent anatomical clarity and image quality, in a 2-month-old girl with atrial septal defects and ventricular septal defect. b Score 4, good anatomical clarity and image quality with minor motion artifacts, in a 6-month-old boy with hypoplastic left heart syndrome after Norwood procedure and pulmonary angioplasty. c Score 3, fair image quality with motion artifacts extending less than 5 mm from the vessel center arrow, here in a 5-day-old boy with transposition of the great arteries, atrial septal defect and patent ductus arteriosus. d Score 2, poor image quality, inadequate delineation between the vessel and surrounding tissue arrow, presence of streak artifacts extending at least 5 mm from the center of the vessel arrowhead, and no useful information obtained. This image is in a 2year-old girl with atrial septal defect. e Score 1, non-diagnostic image quality, in a 1-month-old girl with transposition of the great arteries CTDI vol volume CT dose index, SSDE size-specific dose estimate immediately next to the vessel contour on an axial image and the average CT number was recorded. Image noise was defined as the average standard deviation of the circular region of interest placed at the ascending aorta. Signal-to-noise ratio was defined as ratio of CT number of ascending aorta divided by the image noise. Contrast-to-noise ratio was calculated as the difference in the CT number between the ascending aortic lumen and nearby connective tissue divided by the image noise. Image noise, SNR and CNR were calculated for both full iterative reconstruction and hybrid iterative reconstruction by a cardiovascular radiologist (G.S., with 7 years of experience in pediatric and cardiovascular radiology) who was blinded to the details of the CT datasets. These indicators were compared between the two algorithms.
Statistical analysis
We compared pairs of subjective evaluation scores and objective image-quality indicators for full iterative reconstruction and hybrid iterative reconstruction for each patient. Scores were presented as means ± standard deviations using the Wilcoxon signed-rank test. We calculated interobserver agreement for subjective image quality using Cohen's k statistic and interpreted it as poor (k<0.20), fair (k=0.21-0.40), moderate (k=0.41-0.60), good (k=0.61-0.80), very good (k=0.81-0.90) or excellent (k≥0.91). We conducted all statistical analyses using JMP software (version 12.0.0; SAS Institute, Cary, NC). P<0.05 was statistically significant.
Radiation exposure
Indicators of radiation exposure are summarized in Table 3. The average tube current was 52.7±15.6 mA. A low radiation dose was achieved by the proposed protocol, as indicated by the average effective dose of 0.37±0.23 mSv and average SSDE of 1.36±0.69 mGy.
Subjective image quality
Evaluation of subjective image quality is summarized in Table 4. Interobserver agreement for image quality according to the Cohen's k statistic was very good or excellent. For both algorithms, the scores for each structure and beamhardening artifact were greater than 2/3 and the overall quality was greater than 3/5, which means that subjective image quality was diagnostic. For most gross structures, the evaluation scores were higher with full iterative reconstruction than with hybrid iterative reconstruction. There were significant differences in the scores for the atrial septum, left atrium, left ventricle, right coronary artery and left main trunk. Overall scores were significantly higher with full iterative reconstruction than with hybrid iterative reconstruction. There was no significant difference between full iterative reconstruction and hybrid iterative reconstruction with respect to the presence of beam-hardening artifacts from the contrast-enhancement material. Representative images reconstructed with both algorithms are shown in Figs. 3, 4 and 5.
Objective image quality
Evaluation of objective image quality is summarized in Table 5. Image noise was significantly lower in full iterative reconstruction, while SNR and CNR were significantly higher in full iterative reconstruction.
Discussion
Because of increasing concern about exposure to ionizing radiation, it is necessary to develop low-dose cardiac CT scanning procedures, particularly for use in children.
Unlike hybrid iterative reconstruction, wherein noise is independently reduced in sinograms and image spaces, full iterative reconstruction has fewer streak artifacts and improved spatial resolution on sinograms through forward projection jointly using data from the fidelity, optic, system, cone-beam and statistical noise models. Three major CT vendors have recently introduced advanced modelbased full iterative reconstruction algorithms: Veo (GE Healthcare), Iterative Model Reconstruction (IMR; Philips Healthcare), and forward projected model-based iterative reconstruction solution (FIRST; Toshiba Medical Systems). Among these three full iterative reconstruction algorithms, FIRST is the only algorithm that can be combined with electrocardiogram-gated scan. Another feature of FIRST compared to other iterative reconstruction algorithms (Veo and IMR) is its regularization process. Using forward-projected data, further noise reduction is achieved using an anatomical model via the regularization process and adaptive iteration. The regularization process is optimized for specific organs (e.g., bone, heart, lung and abdomen) to reduce image noise. This series of forwardprojection and regularization processes results in improved noise reduction, spatial resolution and density resolution compared with hybrid iterative reconstruction.
The results of this study revealed that low-dose CT with a 320-row detector and full iterative reconstruction has good diagnostic quality for gross structures. The present 320-row cardiac CT achieved dose levels close to those of the diagnostic reference level for pediatric chest radiographs. Internationally reported effective doses for conventional chest radiography range from 0.01 mSv to 0.299 mSv . Moreover, for most structures the evaluation scores with full iterative reconstruction were higher than those with hybrid iterative reconstruction, particularly for the atrial septum, left atrium, left ventricle, right coronary artery and left main trunk. As shown in Fig. 1, contrast resolution between cardiac and coronary structures and the lumen were significantly improved with full iterative reconstruction over hybrid iterative reconstruction, in accordance with the hypothesis of this study. According to a previous study that compared image quality between full iterative reconstruction and hybrid iterative reconstruction in adult cardiac CT, the SNR was significantly higher with full iterative reconstruction . The result of objective image analysis in the present study revealed that image noise was significantly higher in hybrid iterative reconstruction, while SNR and CNR were significantly higher in full iterative reconstruction. Further experimental studies are necessary to prove this hypothesis.
For evaluation of image quality for left atrium and left ventricle there was a significant difference between full iterative reconstruction and hybrid iterative reconstruction, whereas there was no significant difference for the right atrium and right ventricle. For the right atrium, particularly in the early phase, difficulty existed in evaluation with both algorithms because of turbulent flow caused by the mixture of highly concentrated contrast-enhancement material. For the right ventricle, the contrast material was inhomogeneously distributed because of thick trabeculae and less motion; therefore the right ventricle was difficult to evaluate using both algorithms.
There were several limitations to this study. The diagnostic accuracies of hybrid iterative reconstruction and full iterative reconstruction were not assessed using conventional cardiac angiography. Second only the "cardiac" mode was applied. However the feasibility of the "cardiac-sharp" mode should be validated in further studies. Another limitation is on blindness of subjective image analysis. Because of the study design for the subjective image analysis, recognizing the algorithm seemed to be possible for the readers and this raises a doubt on the validity of the results.
Conclusion
Compared with hybrid iterative reconstruction, full iterative reconstruction provides better depiction with 320-row pediatric cardiac CT. Image quality of low-dose cardiac CT reconstructed with electrocardiogram-gated model-based full iterative reconstruction is clinically acceptable for children. |
package com.intellij.openapi.diff.impl.highlighting;
import com.intellij.openapi.diff.ex.DiffFragment;
import com.intellij.util.Assertion;
import junit.framework.TestCase;
public class LineBlockDividesTest extends TestCase {
private final Assertion CHECK = new Assertion(new FragmentStringConvertion());
@Override
protected void setUp() throws Exception {
super.setUp();
CHECK.setEquality(new FragmentEquality());
}
public void testSingleSide() {
DiffFragment abc_ = new DiffFragment("abc", null);
DiffFragment xyzL_ = new DiffFragment("xyz\n", null);
DiffFragment x_y = new DiffFragment("x", "y");
DiffFragment a_b = new DiffFragment("a", "b");
DiffFragment xyzL_L = new DiffFragment("xyz\n", "\n");
DiffFragment abcL_ = new DiffFragment("abc\n", null);
DiffFragment[][] lineBlocks = LineBlockDivider.SINGLE_SIDE.divide(new DiffFragment[]{
abc_, xyzL_,
x_y, a_b, xyzL_L,
abcL_});
CHECK.compareAll(new DiffFragment[][]{
new DiffFragment[]{abc_, xyzL_}, new DiffFragment[]{x_y, a_b, xyzL_L}, new DiffFragment[]{abcL_}},
lineBlocks);
}
}
|
/// Pull actions can be configured to get configuration and secrets from integrations on demand.
pub fn new() -> PatchedGitHubPull {
PatchedGitHubPull {
url: None,
id: None,
name: None,
description: None,
latest_task: None,
created_at: None,
modified_at: None,
create_environments: None,
create_projects: None,
dry_run: None,
mapped_values: None,
mode: None,
}
} |
<reponame>InfraBox/infrabox<filename>src/services/gcp/controller.go<gh_stars>10-100
package main
import (
"encoding/json"
"fmt"
"os/exec"
"time"
b64 "encoding/base64"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
clusterv1alpha1 "github.com/infrabox/infrabox/src/services/gcp/pkg/apis/gcp/v1alpha1"
clientset "github.com/infrabox/infrabox/src/services/gcp/pkg/client/clientset/versioned"
gkescheme "github.com/infrabox/infrabox/src/services/gcp/pkg/client/clientset/versioned/scheme"
informers "github.com/infrabox/infrabox/src/services/gcp/pkg/client/informers/externalversions"
listers "github.com/infrabox/infrabox/src/services/gcp/pkg/client/listers/gcp/v1alpha1"
)
const controllerAgentName = "infrabox-service-gcp"
type Controller struct {
kubeclientset kubernetes.Interface
gkeclientset clientset.Interface
clusterLister listers.GKEClusterLister
clustersSynced cache.InformerSynced
secretsLister corelisters.SecretLister
secretsSynced cache.InformerSynced
workqueue workqueue.RateLimitingInterface
recorder record.EventRecorder
}
// NewController returns a new sample controller
func NewController(
kubeclientset kubernetes.Interface,
gkeclientset clientset.Interface,
kubeInformerFactory kubeinformers.SharedInformerFactory,
gkeInformerFactory informers.SharedInformerFactory) *Controller {
clusterInformer := gkeInformerFactory.Gcp().V1alpha1().GKEClusters()
secretsInformer := kubeInformerFactory.Core().V1().Secrets()
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
gkescheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
controller := &Controller{
kubeclientset: kubeclientset,
gkeclientset: gkeclientset,
clusterLister: clusterInformer.Lister(),
clustersSynced: clusterInformer.Informer().HasSynced,
secretsLister: secretsInformer.Lister(),
secretsSynced: secretsInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GKEClusters"),
recorder: recorder,
}
glog.Info("Setting up event handlers")
clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueCluster,
UpdateFunc: func(old, new interface{}) {
controller.enqueueCluster(new)
},
DeleteFunc: func(old interface{}) {},
})
return controller
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workqueue.ShutDown()
glog.Info("Starting Cluster controller")
glog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.clustersSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.workqueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
c.workqueue.Forget(obj)
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
if err := c.syncHandler(key); err != nil {
return fmt.Errorf("%s: error syncing: %s", key, err.Error())
}
c.workqueue.Forget(obj)
return nil
}(obj)
if err != nil {
runtime.HandleError(err)
return true
}
return true
}
type MasterAuth struct {
ClientCertificate string
ClientKey string
ClusterCaCertificate string
Username string
Password string
}
type RemoteCluster struct {
Name string
Status string
Endpoint string
MasterAuth MasterAuth
}
func (c *Controller) updateClusterStatus(cluster *clusterv1alpha1.GKECluster, gke *RemoteCluster) error {
oldStatus := cluster.Status.Status
switch gke.Status {
case "RUNNING":
cluster.Status.Status = "ready"
case "PROVISIONING":
cluster.Status.Status = "pending"
default:
cluster.Status.Status = "error"
}
if cluster.Status.Status == oldStatus {
return nil
}
_, err := c.gkeclientset.GcpV1alpha1().GKEClusters(cluster.Namespace).Update(cluster)
return err
}
func (c *Controller) getRemoteClusters() ([]RemoteCluster, error) {
cmd := exec.Command("gcloud", "container", "clusters", "list", "--format", "json")
out, err := cmd.CombinedOutput()
if err != nil {
runtime.HandleError(fmt.Errorf("Could not list clusters: %s", err.Error()))
return nil, err
}
var gkeclusters []RemoteCluster
err = json.Unmarshal(out, &gkeclusters)
if err != nil {
runtime.HandleError(fmt.Errorf("Could not parse cluster list: %s", err.Error()))
return nil, err
}
return gkeclusters, nil
}
func (c *Controller) getRemoteCluster(name string) (*RemoteCluster, error) {
cmd := exec.Command("gcloud", "container", "clusters", "list",
"--filter", "name=ib-"+name, "--format", "json")
out, err := cmd.CombinedOutput()
if err != nil {
runtime.HandleError(fmt.Errorf("Could not list clusters: %s", err.Error()))
glog.Warning(string(out))
return nil, err
}
var gkeclusters []RemoteCluster
err = json.Unmarshal(out, &gkeclusters)
if err != nil {
runtime.HandleError(fmt.Errorf("Could not parse cluster list: %s", err.Error()))
glog.Warning(string(out))
return nil, err
}
if len(gkeclusters) == 0 {
return nil, nil
}
return &gkeclusters[0], nil
}
func newSecret(cluster *clusterv1alpha1.GKECluster, gke *RemoteCluster) *corev1.Secret {
caCrt, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClusterCaCertificate)
clientKey, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClientKey)
clientCrt, _ := b64.StdEncoding.DecodeString(gke.MasterAuth.ClientCertificate)
secretName := cluster.ObjectMeta.Labels["service.infrabox.net/secret-name"]
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: cluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cluster, schema.GroupVersionKind{
Group: clusterv1alpha1.SchemeGroupVersion.Group,
Version: clusterv1alpha1.SchemeGroupVersion.Version,
Kind: "Cluster",
}),
},
},
Type: "Opaque",
Data: map[string][]byte{
"ca.crt": []byte(caCrt),
"client.key": []byte(clientKey),
"client.crt": []byte(clientCrt),
"username": []byte(gke.MasterAuth.Username),
"password": []byte(gke.MasterAuth.Password),
"endpoint": []byte("https://" + gke.Endpoint),
},
}
}
func (c *Controller) deleteSecret(cluster *clusterv1alpha1.GKECluster) (bool, error) {
secretName := cluster.ObjectMeta.Labels["service.infrabox.net/secret-name"]
secret, err := c.secretsLister.Secrets(cluster.Namespace).Get(secretName)
if err != nil {
if errors.IsNotFound(err) {
return true, nil
}
return false, err
}
if secret != nil {
return true, nil
}
glog.Infof("%s/%s: Deleting secret for cluster credentials", cluster.Namespace, cluster.Name)
err = c.kubeclientset.CoreV1().Secrets(cluster.Namespace).Delete(secretName, metav1.NewDeleteOptions(0))
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete secret: %s", cluster.Namespace, cluster.Name, err.Error()))
return false, err
}
return true, nil
}
func (c *Controller) createSecret(cluster *clusterv1alpha1.GKECluster, gkecluster *RemoteCluster) error {
secretName := cluster.ObjectMeta.Labels["service.infrabox.net/secret-name"]
secret, err := c.secretsLister.Secrets(cluster.Namespace).Get(secretName)
if err != nil {
if !errors.IsNotFound(err) {
return err
}
}
if secret != nil {
return nil
}
// Secret does not yet exist
glog.Infof("%s/%s: Creating secret for cluster credentials", cluster.Namespace, cluster.Name)
secret, err = c.kubeclientset.CoreV1().Secrets(cluster.Namespace).Create(newSecret(cluster, gkecluster))
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to create secret: %s", cluster.Namespace, cluster.Name, err.Error()))
return err
}
return nil
}
func (c *Controller) deleteGKECluster(cluster *clusterv1alpha1.GKECluster) (bool, error) {
// Get the GKE Cluster
gkecluster, err := c.getRemoteCluster(cluster.Name)
if err != nil {
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("%s/%s: Could not get GKE Cluster", cluster.Namespace, cluster.Name))
return false, err
}
}
if gkecluster == nil {
return true, nil
}
// Cluster still exists, delete it
glog.Infof("%s/%s: deleting gke cluster", cluster.Namespace, cluster.Name)
cmd := exec.Command("gcloud", "-q", "container", "clusters", "delete", "ib-"+cluster.Name, "--async", "--zone", "us-east1-b")
out, err := cmd.CombinedOutput()
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete cluster", cluster.Namespace, cluster.Name))
glog.Warning(string(out))
return false, err
}
return false, nil
}
func (c *Controller) deleteCluster(cluster *clusterv1alpha1.GKECluster) error {
// Update status to pending
if cluster.Status.Status != "pending" {
cluster.Status.Status = "pending"
cluster, err := c.gkeclientset.GcpV1alpha1().GKEClusters(cluster.Namespace).Update(cluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to update status", cluster.Namespace, cluster.Name))
return err
}
}
// Delete GKE Cluster
deleted, err := c.deleteGKECluster(cluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete GKE cluster", cluster.Namespace, cluster.Name))
return err
}
if !deleted {
return nil
}
// Delete Secret
deleted, err = c.deleteSecret(cluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete secret", cluster.Namespace, cluster.Name))
return err
}
if !deleted {
return nil
}
// Everything deleted, remove finalizers
glog.Infof("%s/%s: removing finalizers", cluster.Namespace, cluster.Name)
cluster.SetFinalizers([]string{})
_, err = c.gkeclientset.GcpV1alpha1().GKEClusters(cluster.Namespace).Update(cluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to set finalizers", cluster.Namespace, cluster.Name))
return err
}
/*
glog.Infof("%s/%s: Finally deleting cluster", cluster.Namespace, cluster.Name)
err = c.gkeclientset.GcpV1alpha1().GKEClusters(cluster.Namespace).Delete(cluster.Name, metav1.NewDeleteOptions(0))
if err != nil {
runtime.HandleError(fmt.Errorf("%s/%s: Failed to delete cluster", cluster.Namespace, cluster.Name))
return err
}
*/
return nil
}
func (c *Controller) syncHandler(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
cluster, err := c.clusterLister.GKEClusters(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("%s: Cluster in work queue no longer exists", key))
return nil
}
return err
}
err = c.syncHandlerImpl(key, cluster.DeepCopy())
if err != nil {
cluster = cluster.DeepCopy()
cluster.Status.Status = "error"
cluster.Status.Message = err.Error()
_, err := c.gkeclientset.GcpV1alpha1().GKEClusters(cluster.Namespace).Update(cluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Failed to update status", key))
return err
}
}
return nil
}
func (c *Controller) syncHandlerImpl(key string, cluster *clusterv1alpha1.GKECluster) error {
glog.Infof("%s: Start sync", key)
// Check wether we should delete the cluster
delTimestamp := cluster.GetDeletionTimestamp()
if delTimestamp != nil {
return c.deleteCluster(cluster)
}
if cluster.Status.Status == "error" {
glog.Infof("%s: Cluster in error state, skipping", key)
return nil
}
// Get the GKE Cluster
gkecluster, err := c.getRemoteCluster(cluster.Name)
if err != nil {
if !errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("%s: Could not get GKE Cluster", key))
return err
}
}
if gkecluster == nil {
glog.Infof("%s: Cluster does not exist yet, creating one", key)
// First set finalizers so we don't forget to delete it later on
cluster.SetFinalizers([]string{"gcp.service.infrabox.net"})
cluster, err := c.gkeclientset.GcpV1alpha1().GKEClusters(cluster.Namespace).Update(cluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Failed to set finalizers", key))
return err
}
name := "ib-" + cluster.Name
args := []string{"container", "clusters",
"create", name, "--async", "--zone", "us-east1-b", "--enable-autorepair"}
if cluster.Spec.DiskSize != "" {
args = append(args, "--disk-size")
args = append(args, cluster.Spec.DiskSize)
}
if cluster.Spec.MachineType != "" {
args = append(args, "--machine-type")
args = append(args, cluster.Spec.MachineType)
}
if cluster.Spec.EnableNetworkPolicy == "true" {
args = append(args, "--enable-network-policy")
}
if cluster.Spec.NumNodes != "" {
args = append(args, "--num-nodes")
args = append(args, cluster.Spec.NumNodes)
}
if cluster.Spec.Preemptible == "true" {
args = append(args, "--preemptible")
}
if cluster.Spec.EnableAutoscaling == "true" {
args = append(args, "--enable-autoscaling")
if cluster.Spec.MaxNodes != "" {
args = append(args, "--max-nodes")
args = append(args, cluster.Spec.MaxNodes)
}
if cluster.Spec.MinNodes != "" {
args = append(args, "--min-nodes")
args = append(args, cluster.Spec.MinNodes)
}
}
cmd := exec.Command("gcloud", args...)
out, err := cmd.CombinedOutput()
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Failed to create gke cluster", key))
glog.Error(string(out))
return err
}
glog.Infof("%s: Cluster creation started", key)
gkecluster, err := c.getRemoteCluster(cluster.Name)
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Could not get GKE Cluster", key))
return err
}
err = c.updateClusterStatus(cluster, gkecluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Failed to update status", key))
return err
}
} else {
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Failed to create secret: %s", key, err.Error()))
return err
}
if gkecluster.Status == "RUNNING" {
glog.Infof("%s: Cluster is ready", key)
err = c.createSecret(cluster, gkecluster)
}
err = c.updateClusterStatus(cluster, gkecluster)
if err != nil {
runtime.HandleError(fmt.Errorf("%s: Failed to update status", key))
return err
}
}
glog.Infof("%s: Finished sync", key)
return nil
}
func (c *Controller) enqueueCluster(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
runtime.HandleError(err)
return
}
c.workqueue.AddRateLimited(key)
}
|
<gh_stars>0
package acceptance_test
import (
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/sclevine/agouti"
. "github.com/sclevine/agouti/matchers"
"github.com/concourse/atc/db"
"github.com/concourse/atc/postgresrunner"
"github.com/tedsuo/ifrit"
"testing"
"time"
)
func TestAcceptance(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Acceptance Suite")
}
var (
atcBin string
certTmpDir string
postgresRunner postgresrunner.Runner
dbConn db.Conn
dbProcess ifrit.Process
sqlDB *db.SQLDB
agoutiDriver *agouti.WebDriver
)
var _ = SynchronizedBeforeSuite(func() []byte {
atcBin, err := gexec.Build("github.com/concourse/atc/cmd/atc")
Expect(err).NotTo(HaveOccurred())
return []byte(atcBin)
}, func(b []byte) {
atcBin = string(b)
SetDefaultEventuallyTimeout(10 * time.Second)
SetDefaultEventuallyPollingInterval(100 * time.Millisecond)
postgresRunner = postgresrunner.Runner{
Port: 5432 + GinkgoParallelNode(),
}
dbProcess = ifrit.Invoke(postgresRunner)
postgresRunner.CreateTestDB()
if os.Getenv("FORCE_SELENIUM") == "true" {
agoutiDriver = agouti.Selenium(agouti.Browser("firefox"))
} else {
agoutiDriver = agouti.ChromeDriver()
}
Expect(agoutiDriver.Start()).To(Succeed())
})
var _ = SynchronizedAfterSuite(func() {
Expect(agoutiDriver.Stop()).To(Succeed())
dbProcess.Signal(os.Interrupt)
Eventually(dbProcess.Wait(), 10*time.Second).Should(Receive())
}, func() {
err := os.RemoveAll(certTmpDir)
Expect(err).NotTo(HaveOccurred())
})
func Screenshot(page *agouti.Page) {
page.Screenshot("/tmp/screenshot.png")
}
func Login(page *agouti.Page, homePage string) {
Expect(page.Navigate(homePage + "/teams/main/login")).To(Succeed())
Eventually(page.FindByName("username")).Should(BeFound())
Expect(page.FindByName("username").Fill("admin")).To(Succeed())
Expect(page.FindByName("password").Fill("password")).To(Succeed())
Expect(page.FindByButton("login").Click()).To(Succeed())
}
|
/**
* Transforms a sequence of bytes into a Time unit
* @param bin
* @param startIndex
* @return
*/
public static MessageMetadata byteToMessageMetaData(byte[] bin, int startIndex){
int r = 0;
r += (int)((bin[startIndex ] << 24) & 0xFFFFFFFF);
r += (int)((bin[startIndex + 1] << 16) & 0xFFFFFF);
r += (int)((bin[startIndex + 2] << 8 ) & 0xFFFF);
r += (int)(bin[startIndex + 3] & 0xFF);
return new MessageMetadata(r);
} |
/**
* Parses a prodom.ipr file and creates Signature / Method objects appropriately.
*
* @author Matthew Fraser, EMBL-EBI, InterPro
* @version $Id$
* @since 1.0-SNAPSHOT
*/
public class ProDomModelParser extends AbstractModelFileParser {
/*
* Example prodom.ipr input file:
*
* >IGJ_RABIT#PD021296#1#136 | 136 | pd_PD021296;sp_IGJ_RABIT_P23108; | (6) J IMMUNOGLOBULIN CHAIN GLYCOPROTEIN SEQUENCING DIRECT IGJ_PREDICTED ACID PYRROLIDONE CARBOXYLIC
* EDESTVLVDNKCQCVRITSRIIRDPDNPSEDIVERNIRIIVPLNTRENISDPTSPLRTE
* FKYNLANLCKKCDPTEIELDNQVFTASQSNICPDDDYSETCYTYDRNKCYTTLVPITHR
* GGTRMVKATLTPDSCYPD
* >IGJ_HUMAN#PD021296#2#137 | 136 | pd_PD021296;sp_IGJ_HUMAN_P01591; | (6) J IMMUNOGLOBULIN CHAIN GLYCOPROTEIN SEQUENCING DIRECT IGJ_PREDICTED ACID PYRROLIDONE CARBOXYLIC
* EDERIVLVDNKCKCARITSRIIRSSEDPNEDIVERNIRIIVPLNNRENISDPTSPLRTR
* FVYHLSDLCKKCDPTEVELDNQIVTATQSNICDEDSATETCYTYDRNKCYTAVVPLVYG
* GETKMVETALTPDACYPD
*
*/
private static final Logger LOGGER = Logger.getLogger(ProDomModelParser.class.getName());
private static final Pattern LINE_PATTERN = Pattern.compile("^>");
private static final Pattern ACCESSION_PATTERN = Pattern.compile("^pd_PD\\d+;sp_");
private static final Pattern DESCRIPTION_PATTERN = Pattern.compile("^\\(\\d+\\)\\s+");
@Transactional
public SignatureLibraryRelease parse() throws IOException {
LOGGER.debug("Starting to parse prodom.ipr file.");
SignatureLibraryRelease release = new SignatureLibraryRelease(library, releaseVersion);
for (Resource modelFile : modelFiles) {
BufferedReader reader = null;
try {
StringBuffer modelBuffer = new StringBuffer();
reader = new BufferedReader(new InputStreamReader(modelFile.getInputStream()));
int lineNumber = 0;
String line;
while ((line = reader.readLine()) != null) {
if (LOGGER.isDebugEnabled() && lineNumber++ % 10000 == 0) {
LOGGER.debug("Parsed " + lineNumber + " lines of the prodom.ipr file.");
LOGGER.debug("Parsed " + release.getSignatures().size() + " signatures.");
}
Matcher data = LINE_PATTERN.matcher(line);
if (data.find()) {
String accession = null;
String description = null;
// Load the model line by line into a temporary buffer.
line = line.trim();
modelBuffer.append(line);
modelBuffer.append('\n');
// Now parse the model line
String[] values = line.split("\\|");
int i = 0;
while (i < values.length) {
switch (i) {
case 2:
// Accession
// Example: PD021296
String text = values[2]; // Example: pd_PD021296;sp_IGJ_RABIT_P23108;
if (text == null) {
LOGGER.warn("ProDom model parser could not extract the accession from NULL text "
+ " on line number " + lineNumber + " - so this can't be added to the database");
}
else {
text = text.trim();
Matcher accMatcher = ACCESSION_PATTERN.matcher(text);
if (accMatcher.find()) {
accession = text.substring(3, text.indexOf(';'));
}
else {
LOGGER.warn("ProDom model parser could not extract the accession from this text: "
+ text + " on line number " + lineNumber + " - so this can't be added to the database");
}
}
break;
case 3:
// Description
// Example: J IMMUNOGLOBULIN CHAIN GLYCOPROTEIN ...
String text2 = values[3]; // Example: (6) J IMMUNOGLOBULIN CHAIN GLYCOPROTEIN ...
if (text2 == null) {
LOGGER.warn("ProDom model parser could not extract the description from NULL text "
+ " on line number " + lineNumber + " - so this can't be added to the database");
}
else {
text2 = text2.trim();
Matcher descMatcher = DESCRIPTION_PATTERN.matcher(text2);
int index = text2.indexOf(')') + 1;
if (descMatcher.find() && index < text2.length()) {
description = text2.substring(index);
description = description.trim();
}
else {
LOGGER.warn("ProDom model parser could not extract the description from this text: "
+ text2 + " on line number " + lineNumber + " - so this can't be added to the database");
}
}
break;
}
i++;
}
// Now create the signature
if (accession != null) {
release.addSignature(createSignature(accession, null, description, release, modelBuffer));
}
}
}
}
finally {
if (reader != null) {
reader.close();
}
}
}
return release;
}
protected Signature createSignature(String accession, String name, String description, SignatureLibraryRelease release, StringBuffer modelBuffer) {
Model model = new Model(accession, name, description);
modelBuffer.delete(0, modelBuffer.length());
return new Signature(accession, name, null, description, null, release, Collections.singleton(model));
}
} |
def install_config(src: str, dest: str, mode: str = "0664", force: bool = False):
if os.path.exists(dest) and not force:
return
args = ["-b", "-o", "root", "-g", "wheel", "-m", mode, src, dest]
_install(*args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.