content
stringlengths
7
2.61M
Dermatological and epidemiological profile of patients with albinism in Togo in 2019: results of two consultation campaigns. BACKGROUND People with albinism (PWA) are targets of prejudice and social exclusion and have limited access to specialized medical care and resources. Our study aimed to describe the epidemiological and dermatological profile of PWA in Togo. METHOD We carried out a cross-sectional study of 294 medical records of PWA systematically examined during two mobile skin care clinics in 2019. RESULTS The mean age of the patients was 22 ± 16.5 years, and the gender ratio (M/F) was 1. A family history of albinism was reported in 27.9% of cases, and consanguinity between the parents was found in 24.1% of PWA. Dermatological lesions on physical examination were present in 95.2% of PWA. These lesions were elastosis/wrinkles (82.9%), ephelides (79.6%), actinic keratoses (60.0%), actinic cheilitis (50.0%), and cutaneous carcinomas (11.8%). The mean age of PWA with skin carcinomas was 38.6 years. Fifty-four cases of cutaneous carcinomas had been diagnosed (31 cases of basal cell carcinoma, 21 cases of squamous cell carcinoma, and 2 cases of Bowen's disease) and were located mainly in the cephalic region (61.1%) and upper limbs (27.8%). Cryotherapy and/or excisional biopsies were the treatments for premalignant and malignant lesions. All PWA were sensitized on sun protection. CONCLUSIONS This study shows the basic picture of dermatological characteristics of PWA with a high frequency of photo-induced lesions and skin cancers. The popularization and respect of photoprotection measures and regular skin examination of these PWA for early detection and management of lesions will reduce their morbidity and mortality.
def compare_random_variable_cdfs(self, random_variable_1, random_variable_2, variable="x", plot_dir=".", plot_suffix="CDFscompare", show_figure=True, save_figure=False, variable_names=None, x_limits=None, labels=None): (random_variable_1_grids, random_variable_1_cdfs) = \ random_variable_1.get_plot_cdfs() (random_variable_2_grids, random_variable_2_cdfs) = \ random_variable_2.get_plot_cdfs() if save_figure: plot_name = os.path.join(plot_dir, plot_suffix) else: plot_name = None if variable_names is not None: if len(variable_names) != self._target._dim: raise ValueError("Wrong number of variable names provided") else: variable_names = [] for i in range(random_variable_1.dim): if random_variable_1.dim == 1: variable_names.append(variable) else: variable_names.append(variable + "_" + str(i + 1)) print "variable names = ", variable_names for i in range(random_variable_1.dim): variable = variable_names[i] if plot_name is not None: plot_name_ = plot_name + "_" + variable + ".pdf" plot_name_ = plot_name_.translate(None, "$") else: plot_name_ = None y_label = r'$F($' + variable + r'$)$' axis_font = {'fontname': 'Arial', 'size': 26, 'weight': 'normal'} label_font = 'Arial' label_size = 20 legend_font = 22 x1 = random_variable_1_grids[:, i] cdf1 = random_variable_1_cdfs[:, i] x2 = random_variable_2_grids[:, i] cdf2 = random_variable_2_cdfs[:, i] fig, ax = plt.subplots(1) if labels is None: labels = ["random_variable_1", "random_variable_2"] ax.plot(x1, cdf1, 'r--', linewidth=4.5, label=labels[0]) ax.plot(x2, cdf2, 'b-', linewidth=2.5, label=labels[1]) ax.legend(loc='best', prop={'size': legend_font}) ax.axis([min(x1), max(x1), 0, 1.1]) if x_limits is not None: ax.axis([x_limits[i][0], x_limits[i][1], 0, 1.1]) ax.set_xlabel(variable, **axis_font) ax.set_ylabel(y_label, **axis_font) for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontname(label_font) label.set_fontsize(label_size) plt.tight_layout() if plot_name_ is not None: plt.savefig(plot_name_) if show_figure: plt.show()
<filename>src/GaIA/pkgs/postfix/postfix-3.0.2/src/postconf/postconf_print.c /*++ /* NAME /* postconf_print 3 /* SUMMARY /* basic line printing support /* SYNOPSIS /* #include <postconf.h> /* /* void pcf_print_line(fp, mode, const char *fmt, ...) /* VSTREAM *fp; /* int mode; /* const char *fmt; /* DESCRIPTION /* pcf_print_line() formats text, normalized whitespace, and /* optionally folds long lines. /* /* Arguments: /* .IP fp /* Output stream. /* .IP mode /* Bit-wise OR of zero or more of the following (other flags /* are ignored): /* .RS /* .IP PCF_FOLD_LINE /* Fold long lines. /* .RE /* .IP fmt /* Format string. /* DIAGNOSTICS /* Problems are reported to the standard error stream. /* LICENSE /* .ad /* .fi /* The Secure Mailer license must be distributed with this software. /* AUTHOR(S) /* <NAME> /* <NAME> Research /* P.O. Box 704 /* Yorktown Heights, NY 10598, USA /*--*/ /* System library. */ #include <sys_defs.h> #include <string.h> #include <stdarg.h> /* Utility library. */ #include <msg.h> #include <vstream.h> #include <vstring.h> /* Application-specific. */ #include <postconf.h> /* SLMs. */ #define STR(x) vstring_str(x) /* pcf_print_line - show line possibly folded, and with normalized whitespace */ void pcf_print_line(VSTREAM *fp, int mode, const char *fmt,...) { va_list ap; static VSTRING *buf = 0; char *start; char *next; int line_len = 0; int word_len; /* * One-off initialization. */ if (buf == 0) buf = vstring_alloc(100); /* * Format the text. */ va_start(ap, fmt); vstring_vsprintf(buf, fmt, ap); va_end(ap); /* * Normalize the whitespace. We don't use the line_wrap() routine because * 1) that function does not normalize whitespace between words and 2) we * want to normalize whitespace even when not wrapping lines. * * XXX Some parameters preserve whitespace: for example, smtpd_banner and * smtpd_reject_footer. If we have to preserve whitespace between words, * then perhaps readlline() can be changed to canonicalize whitespace * that follows a newline. */ for (start = STR(buf); *(start += strspn(start, PCF_SEPARATORS)) != 0; start = next) { word_len = strcspn(start, PCF_SEPARATORS); if (*(next = start + word_len) != 0) *next++ = 0; if (word_len > 0 && line_len > 0) { if ((mode & PCF_FOLD_LINE) == 0 || line_len + word_len < PCF_LINE_LIMIT) { vstream_fputs(" ", fp); line_len += 1; } else { vstream_fputs("\n" PCF_INDENT_TEXT, fp); line_len = PCF_INDENT_LEN; } } vstream_fputs(start, fp); line_len += word_len; } vstream_fputs("\n", fp); }
Differential amplifiers are circuits used to amplify a difference between two input voltages of the amplifier. An example of a differential amplifier is an operational amplifier (“op-amp”) which receives a non-inverting input (V+) and an inverting input (V−) and outputs a single-ended output (Vout). Feedback between the output and the inverting input may be used to control a gain of the circuit. An instrumentation amplifier may be a circuit which uses a main amplifier to amplify a differential signal and one or more additional amplifiers as input buffers. Instrumentation amplifiers may be used to test electronic equipment. In one example application, instrumentation amplifiers may be used to measure a resistance of connections (e.g., through silicon/substrate vias, TSVs) between semiconductor dies or chips of a semiconductor memory device. FIG. 1 is a schematic diagram of a prior art instrumentation amplifier. The instrumentation amplifier 100 includes a first amplifier 102 coupled to a second amplifier 104. The first amplifier 102 includes two operational amplifiers OP1 and OP2. The outputs of OP1 and OP2 are coupled by resistors R21, R1, and R22 which are coupled in series from the output of OP1 to the output of OP2. The inverting input of OP1 is coupled between resistors R21 and R1, and the inverting input of OP2 is coupled between R1 and R22. The non-inverting input of OP1 is coupled to an input voltage INp, while the non-inverting input of OP2 is coupled to an input voltage INn. The outputs of OP1 serves as in input IA1p to the second amplifier 104, and the output of OP2 serves as an input IA1n to the second amplifier 104. The operational amplifiers OP1 and OP2 may act as input buffers on the inputs INp and INn respectively. The second amplifier 104 includes an op-amp OP3. The input IA1p is coupled to ground via resistors R31 and variable resistor R41 coupled in series. The non-inverting input of OP3 is coupled between R31 and R41. The input IA1n is coupled to the output of OP3 via resistors R32 and R42, which are coupled in series. The inverting input of OP3 is coupled between R32 and R42. The resistors R21 and R22 are equal in resistance to each other. The resistors R31 and R32 are equal in resistance, and the variable resistor R41 has a default resistance equal the resistance R42. The first amplifier 102 receives the voltages INp and INn as inputs, and provides voltages IA1p and IA1n as outputs. The input voltages may be expressed as a common voltage Vcom (a voltage equal across the differential inputs) and a differential signal amplitude Vin. Thus, the input voltage INp may be expressed as Vcom+(½)*Vin, while the voltage at Inn may be expressed as Vcom−(½)*Vin. From this, the output voltages V(1A1p) and V(1A1n) may be calculated by equations 1 and 2 below: V ⁡ ( IA ⁢ ⁢ 1 ⁢ p ) = V ⁢ ⁢ com + Vin ⁡ ( 1 2 + R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) + VOS ⁢ ⁢ 1 ⁢ ( 1 + R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) - VOS ⁢ ⁢ 2 ⁢ ( R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) Eqn . ⁢ 1 V ⁡ ( IA ⁢ ⁢ 1 ⁢ n ) = Vcom - Vin ⁡ ( 1 2 + R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) - VOS ⁢ ⁢ 1 ⁢ ( R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) + VOS ⁢ ⁢ 2 ⁢ ( 1 + R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) Eqn . ⁢ 2 R2 is the value of the resistors R21 and R22, which have an equal resistance to each other. VOS1 and VOS2 are offset voltages which may exist on the input voltages INp and INn of the non-inverting inputs of the op-amps OP1 and OP2, respectively. The voltages VOS1 and VOS2, as shown in FIG. 1, may have a reversed polarity to each other. The difference Vdif between the two outputs V(1A1p) and V(1A1n) may be derived by subtracting Eqn. 2 from Eqn. 1 to yield equation 3 below: Vdif = ( 1 + 2 ⁢ R ⁢ ⁢ 2 R ⁢ ⁢ 1 ) ⁢ ( Vin + VOS ⁢ ⁢ 1 - VOS ⁢ ⁢ 2 ) Eqn . ⁢ 3 From the above equations, the amplification factor (gain) Acom1 of the common voltage Vcom and the amplification factor A1 of the differential signal Vin may be determined by equations 4 and 5 below:Acom1=1  Eqn. 4 A ⁢ ⁢ 1 = 1 + 2 ⁢ R ⁢ ⁢ 2 R ⁢ ⁢ 1 Eqn . ⁢ 5 Accordingly, since the common gain Acom1 is unity, the common voltage between the differential inputs INp and INn will not be increased, while the differential voltage Vin will increase by a factor based on the value of R2 and R1. The second amplifier 104 receives the amplified outputs provided by the first amplifier 102 as an input. The common input voltage between inputs IA1p and IA1n remains Vcom, since the Acom1 of the first amplifier 102 is 1. If the difference between the offset voltages VOS1 and VOS2 is assumed to be negligible (e.g., VOS1=VOS2), then the voltage of IA1p is Vcom+(½)*Vdif, while the voltage of IA1n is Vcom−(½)*Vdif. Thus, the voltage Vout provided by the second amplifier 104 may be estimated by equation 6 below: VOUT ≈ Vdif ⁡ ( R ⁢ ⁢ 4 R ⁢ ⁢ 3 ) + VOS ⁢ ⁢ 3 ⁢ ( 1 + R ⁢ ⁢ 4 R ⁢ ⁢ 3 ) + Vcom ⁡ ( R ⁢ ⁢ 4 R ⁢ ⁢ 3 + R ⁢ ⁢ 4 ⁢ Δ ) Eqn . ⁢ 6 In equation 6, the values of resistors R31 and R32 are assumed to be equal to R3. The value of resistor R42 is equal to R4. The adjustable resistor R41 has an initial value R4 which is trimmed to cancel offset. The above equation assumes that the resistor R41 is trimmed by a percentage of its value Δ, and that Δ is much less than 1. VOS3 is the offset voltage of the third op-amp OP3. By dividing the above equations, a common amplification Acom2 and a differential amplification A2 of the second amplifier 104 can be determined to be: Acom ⁢ ⁢ 2 = R ⁢ ⁢ 4 R ⁢ ⁢ 3 + R ⁢ ⁢ 4 ⁢ Δ Eqn . ⁢ 7 A ⁢ ⁢ 2 = R ⁢ ⁢ 4 R ⁢ ⁢ 3 Eqn . ⁢ 8 From the above equations, the overall output of the instrumentation amplifier 100 may be determined by equation 9 below:VOUT≈Vin·A1·A2+(VOS1+VOS2)A1·A2+VOS3(1+A2)+Vcom·Acom2   Eqn. 9 The output depends on an amplification of the signal (e.g., Vin*A1*A2) as well as offset error terms dependent on VOS1, VOS2, and VOS3. The second and third terms of equation 9 (e.g., the offset error terms) may offset by adjusting the trimming percentage Δ to cancel the offset error in the output. However, in order to adjust the trimming percentage, the input common mode voltage Vcom must be set to a known predetermined value. FIG. 2 shows a prior art adjustable resistor 200. The adjustable resistor 200 may, in some embodiments, be used as the adjustable resistor R41 of FIG. 1. The adjustable resistor 200 may include input node IN, output node OUT, a number of resistors R1-R10 (here labeled with their resistance values, e.g., 200 k, 300 k etc.), and switches SW0-SW7. Although specific values are shown for the resistors, it is to be understood that the adjustable resistor 200 may be configured with many different values of resistor R1-R10. Similarly, the adjustable resistor 200 may have more or less switches than the eight switches SW0-SW7 shown. The switches SW0-SW7 may be sequentially activated by a counter (not shown) to decrease the resistance of the adjustable resistor 200. In the example adjustable resistor 200 shown, the adjustable resistor 200 has a default resistance of 500 k due to the values of R1 and R2 (e.g., 200 k and 300 k) in series between the input and output nodes (IN and OUT respectively). As the switches SW0 to SW7 are activated in sequence, the value of the adjustable resistor 200 may decrease in eight stages from −3% to −24%. Although the example adjustable resistor 200 shows only a decrease in resistance from a default value, additional resistors and switches may be added in series between the input node IN and output node OUT to allow for an increase in the resistance of adjustable resistor 200. The adjustable resistor 200 may be optimized for a specific application. For example, when used to trim an instrumentation amplifier (e.g., amplifier 100 of FIG. 1), the step width (e.g., the number of steps between the maximum and minimum resistance) as well as the values of the maximum and minimum resistance may be optimized. The adjustable resistor 200 may be operated by a counter, which may be responsive to an output of the amplifier 100 in order to automatically determine the amount of trim needed to cancel the offset error of the amplifier 100. FIG. 3 shows a diagram indicative of operating characteristics for an instrumentation amplifier. FIG. 3 includes graph 300, which illustrates the operating characteristics of an instrumentation amplifier such as the instrumentation amplifier 100 of FIG. 1 for two different gain configurations of the instrumentation amplifier. The x-axis of the graph 300 is the output voltage of the amplifier Vout. The y-axis of the graph 300 is the common voltage between the differential inputs Vcom. The graph 300 illustrates an example scenario wherein the amplifier 100 of FIG. 1 is configured to provide a high gain, in this case a gain of 100. The dotted lines show a scenario where an overall gain of 100 is achieved by setting the gains (e.g., by selecting resistor values) of the first amplifier 102 and the second amplifier 104 to 10. The solid lines show a scenario where the overall gain of 100 is achieved by setting a gain of the first amplifier 102 to 100 and the gain of the second amplifier 104 to 1. Instrumentation amplifiers (e.g., amplifier 100) may be used to test a variety of circuit components. If a known current is used, the instrumentation amplifier may measure a voltage in order to determine a resistance of a circuit component, in order, for example, to check for manufacturing defects and/or damage to the component. The amplifier may need to have a sensitivity to small changes in resistance. While increasing a gain of the amplifier may increase small changes in the signal, it may also prevent easy cancellation of the offset and narrow a range of common voltages in which the instrumentation amplifier can stably operate. There remains a need for high-gain instrumentation amplifiers which can correctly cancel offset and stably operate while keeping the range of the input common mode voltage wide.
Marketing 101 (along with actual demographic research) teaches that brand loyalty tends to be established at a young age—the painfully image-conscious adolescent years, especially. This is why a number of nervier adult brands have rolled out youth extensions: Pink by Victoria’s Secret, for instance, or Teen Vogue, or U.K. brand King of Shaves’ recently introduced Kings 1965, a line of shaving products targeted at teen boys hard at work on their first whiskers. Well, get ready youth of America: Mercedes-Benz is the latest grown-up brand to want you. No, the iconic German automaker isn’t cranking out a new line of skateboards. Rather, Mercedes-Benz USA just announced the creation of a teen-driving school that will open later this year (cost and location undisclosed). The project is a pond jump from the U.K.-based Mercedes-Benz Driving Academy—also aimed at youth—that opened in 2009. Some 4,500 peach-faced motorists have taken that course, which surely beats the sleep-inducing 30-hour class we all endured during high school. Seventy-nine percent of the U.K. school grads passed their road test on the first try, which is nearly double the U.K. average of 43 percent. Of course, Mercedes’ stated aim for the U.S. teen-driving school is to turn out safe drivers, not the next Dale Earnhardt. “The skills required to simply get a license do not fully prepare young drivers to meet the demands of the road,” according to Daimler AG’s senior manager Alexander Hobbach—and who are we to doubt the man? But let’s separate out the parts here. Most teen boys in America are as likely to get a chance to test the 4.3-second, 0-to-60 acceleration of a Mercedes coupe as they are to get a date with one of the Kardashians. So once you give Junior a spin around the track with 451 horses under the hood and his hands on that leather Alcantara-grip steering wheel, tell us: When he grows up, what brand of car do you think he’ll want to buy? Hence, this looks like a smart move for Mercedes, which, by the way, already offers a roving driver’s class for adults called the Mercedes AMG Driving Academy at various racetracks around the country. The basic course starts at around $1,800—an amount that, last we checked, was considerably cheaper than an SLS convertible.
def clean_html_text(self, html_string: str) -> list: orig_tokens, clean_tokens = self.create_token_lists(html_string) clean_html_tokens = [] clean_counter = 0 for i in range(len(orig_tokens)): orig_token = orig_tokens[i] clean_html_token = clean_tokens[clean_counter] if orig_token.name == clean_html_token.name: orig_token.set_index(len(clean_html_tokens)) clean_token = CleanToken(orig_token) clean_token.set_clean(clean_html_token.name) clean_html_tokens.append(clean_token) else: if clean_html_token.name == SSML_LANG_START: next_clean = clean_tokens[clean_counter + 1] tag_str = SSML_LANG_START + ' ' + next_clean.name tag_tok = TagToken(tag_str, len(clean_html_tokens)) tag_tok.ssml_start = True clean_html_tokens.append(tag_tok) clean_counter += 1 elif clean_html_token.name.startswith(SSML_LANG_END): tag_tok = TagToken(SSML_LANG_END, len(clean_html_tokens)) tag_tok.ssml_end = True clean_html_tokens.append(tag_tok) orig_token.set_index(len(clean_html_tokens)) clean_token = CleanToken(orig_token) clean_token.set_clean(clean_tokens[clean_counter + 1].name) clean_html_tokens.append(clean_token) clean_counter += 1 else: orig_token.set_index(len(clean_html_tokens)) clean_token = CleanToken(orig_token) clean_token.set_clean(clean_html_token.name) clean_html_tokens.append(clean_token) clean_counter += 1 return clean_html_tokens
Evaluation of rat liver with ARFI elastography: In vivo and ex vivo study Objective The aim of this study was to compare in vivo vs ex vivo liver stiffness in rats with acoustic radiation force impulse (ARFI) elastography using the histological findings as the gold standard. Methods Eighteen male Wistar rats aged 1618 months were divided into a control group (n = 6) and obese group (n = 12). Liver stiffness was measured with shear wave velocity (SWV) using the ARFI technique both in vivo and ex vivo. The degree of fibrosis, steatosis and liver inflammation was evaluated in the histological findings. Pearsons correlation coefficient was applied to relate the SWV values to the histological parameters. Results The SWV values acquired in the ex vivo study were significantly lower than those obtained in vivo (P < 0.004). A significantly higher correlation value between the degree of liver fibrosis and the ARFI elastography assessment was observed in the ex vivo study (r = 0.706, P < 0.002), than the in vivo study (r = 0.623, P < 0.05). Conclusion Assessment of liver stiffness using ARFI elastography yielded a significant correlation between SWV and liver fibrosis in both the in vivo and ex vivo experiments. We consider that by minimising the influence of possible sources of artefact we could improve the accuracy of the measurements acquired with ARFI. Introduction Non-alcoholic fatty liver disease (NAFLD) is one of the most common causes of chronic hepatopathy in adults, with a prevalence of up to 20-30% in developed countries. This entity represents a wide spectrum of pathologies ranging from a simple steatosis (80-90% of cases) to steatohepatitis (10-20%). Moreover, in the absence of diagnosis and early treatment it can lead to a progressive liver fibrosis and subsequent liver cirrhosis. Today's gold standard for assessing liver involvement is still biopsy. However, it does have considerable limitations and complications, some of which, though infrequent, are potentially fatal. This makes it necessary to find a non-invasive diagnostic method to enable an accurate and reproducible assessment of this pathology. The acoustic radiation force impulse (ARFI) elastography technique, capable of evaluating stiffness of the liver parenchyma through short-duration high-intensity acoustic pulses, has yielded promising results, which makes it an important diagnostic alternative for NAFLD assessment, even with normal laboratory values. Various studies have focused on the assessment of liver fibrosis and the possible degree of influence that hepatic steatosis and associated inflammatory processes may have on the final result. However, a greater number of experiments are needed to focus on external factors that may influence the acquisition of measurements. Bruno et al claim there are physical, geometrical, anatomical and physiological factors that influence shear wave velocity (SWV) measured with ARFI. Characteristics such as movement during the acquisition of measurements, the depth of the region of interest (ROI), the ultrasound frequency used, the extrinsic compression exerted by the transducer, the orientation of the ROI with regard to the surface of the target organ and physiological factors such as heartbeat, respiratory movements or fasting, might alter the assessment of liver stiffness. Our hypothesis is that these factors could considerably alter the accuracy of ARFI velocities. The aim of this study was to compare in vivo and ex vivo liver stiffness in rats with the ARFI technique using the histological findings as the gold standard. Animal study A total of 18 Wistar male rats aged 16-18 months were maintained under constant cycles of light-dark (12:12 hours) and temperature (25C) in the Murcia University animal-housing unit. They were divided into two groups: control group (n = 6) and obese group (n = 12). One of the animals died during the in vivo experiment and was excluded from the study. All the rats were fed a diet made up of 61% carbohydrates, 15% fats and 24% proteins. Subsequently, the obese group was administered a diet with a high fat content (Harlan TD.06414) during the 6 weeks prior to the study, in which 60.3% of the kcals came from fat (37% saturated; 47% monounsaturated; 16% polyunsaturated) and the rest from carbohydrates (21.3%) and proteins (18.4%). Before ultrasound study, animals were heavily sedated with an intraperitoneal injection of 0.1 ml/100 gr sodium pentobarbital in order to perform an in vivo ultrasound evaluation. The same drug caused the animals death 10-15 minutes after the injection and the ex vivo ultrasound study and blood and liver extraction were performed. Ethics statement The experimental protocols were designed according to the "Guiding principles for research involving animals and humans" adopted by the American Physiology Society and the European Union standards and received approval from the University of Murcia's "Institutional Animal Care and Use Committee" (CEEA) in process number A1320140709. Laboratory tests A blood extraction (5 ml) was taken by cardiac puncture. The blood was anticoagulated with 0.1 ml of EDTA and kept cold until centrifugation (3000 rpm for 15 minutes). The plasma samples were stored in Eppendorf Tubes at -80C until the measurements were taken. A complete lipid profile was obtained, with calculation of HDL (mg/dl), LDL (mg/dl) and triglyceride levels (mg/dl) using an ELISA Kit from "Shanghai Yehua Biological Technology" (Shanghai, China). We also calculated the levels of alanine aminotransferase (ALT) (U/ml) using a kit supplied by Cayman Chemical (ref. 700260) for laboratory assessment of liver damage. Protocol for the ARFI technique All the ultrasound studies were performed by a radiologist with more than 20 years' experience in conventional ultrasonography and more than 10 years with the ARFI technique, who was blind to the laboratory and histology results. The ARFI values were obtained with an Acuson S2000 system (Siemens, Erlangen, Germany) using the Virtual Touch Tissue Quantification software, which is capable of generating and detecting the transverse or shearing waves used to determine ARFI values, represented as SWV measured in metres per second. A 9L4 transducer was used, with a 700 cycle push transmit event at either 4,44 or 5,71MHz, depending on the region of interest (ROI) depth, and a shear wave bandwidth of 123-158Hz. Fig 1 shows both the conventional ultrasound study and the ARFI elastography measurements in the liver parenchyma, which were taken both in vivo, with the animals anaesthetised (Fig 1A), and ex vivo, after they were euthanised, with the liver explant placed in a plastic container with physiological serum for an immersion ultrasound study to be conducted ( Fig 1B). To determine the ARFI values we established three ROIs in the animals' right liver lobes (medial and lateral) in both the in vivo and ex vivo study. Five measurements were taken in each selected ROI. These regions had been studied previously by conventional ultrasound. The ROIs were situated at a depth of 1-2cm in the liver parenchyma, away from the capsular region and major intrahepatic vascular structures and including the largest possible amount of liver parenchyma; a minimum extrinsic compression was exerted with the transducer on the abdominal wall in the in vivo study with application of physiological serum as a connector; the ROIs were arranged parallel to the capsular surface and five measurements were obtained for Evaluation of rat liver with ARFI elastography: In vivo and ex vivo study each ROI to avoid error variability. The ARFI result in each ROI, expressed as SWV measured in metres/second, was expressed as the mean and standard deviation of the five measurements obtained for each one. In vivo ARFI For the in vivo ultrasound study the animals' abdominal fur was removed completely using a depilatory cream to avoid the formation of artefacts. The animals were then heavily sedated to induce a drop in respiratory and heart rate in order to reduce the artefacts caused by the heartbeat and movements in the thoracic cavity. Immediately after injection of the drug the animals were placed and secured in a decubitus position to enable adequate ultrasound exploration. For the B-mode ultrasound study of the liver parenchyma we explored the upper abdominal quadrants, focusing on the subcostal region, until we located the two right liver lobes. During the conventional study we explored the right liver lobe thoroughly and established the ROIs that would subsequently be used for recording the SWVs, as shown in Ex vivo ARFI Once the animals had been euthanised their liver parenchyma was removed for the ex vivo ultrasound study. The explants were placed in a jar with physiological serum before the study. We then introduced the liver explant into a plastic container with physiological serum at room temperature to conduct an immersion ultrasound study. As in the in vivo study, the two right liver lobes were located to establish the ROIs, as shown in Histology The liver samples were placed in histology cassettes (Labolan, Navarra, Spain) and submerged in 4% commercial formalin buffered in PBS (pH 7.0, Panreac Qumica, Barcelona, Spain) for subsequent processing using a multifunctional microwave histoprocessor (Milestone KOS Histostation, Milestone, Bergamo, Italy) and inclusion in paraffin. The histological analysis was conducted on the liver samples with haematoxylin-eosin (HE) and Masson trichrome (TRIC) staining for assessment of steatosis, inflammatory processes and liver fibrosis. The inflammatory infiltrate (CD3+ T lymphocytes) was also determined by immunohistochemical determination on liver sections in paraffin using an indirect colorimetric technique based on the avidin-biotin-peroxidase complex (ABC technique). The fibrosis surface area was calculated quantitatively and semi-quantitatively. The quantitative assessment was done by histomorphometric analysis using a specific software package (AxioVision Rel. 4.8, Zeiss); the connective tissue surface was calculated in 10 random fields from the liver sections of each animal and the result expressed as mean standard deviation of the fibrosis surface of the 10 fields. In addition, the semi-quantitative METAVIR scale was used to assess the liver fibrosis. This scale is divided into five stages: F0, no fibrosis; F1, perisinusoidal or periportal fibrosis; F2, perisinusoidal and periportal fibrosis; F3, fibrous bridges; F4, cirrhosis. Positive immunoreaction (CD3+ T lymphocytes) was identified as a dark brown pericellular halo. To establish possible differences in CD3+ T lymphocyte infiltration depending on the technique, we calculated the weighted mean ± standard deviation of the positive cell count in a minimum of 10 random fields at high magnification (x400) of each animal's liver sections. To determine the grade of steatosis we used the scale defined by Brunt et al based on the percentage of cellular lipid overload: grade E0, no steatosis; grade E1, 0-33%; grade E2, 33-66%; grade E3, >66%. The histological and immunohistochemical studies were performed by a veterinary anatomical pathologist with 7 years' experience. Statistical analysis The statistical analysis was carried out using the SPSS software package version 15.0 (SPSS for Windows, Chicago, IL, USA). The Kolmogorov-Smirnov test was used to verify that the quantitative variables followed a normal distribution. In this way, parametric statistical tests were applied due to their greater statistical power. The Student t test was applied to determine differences between the intergroupal means of the quantitative variables (surface area of liver fibrosis per field, number of hepatic CD3 lymphocytes per field and SWVs obtained with ARFI in the liver parenchyma) and to establish differences between quantitative variables and dichotomous variables (Brunt classification ). To analyse the degree of correlation between the SWVs and the quantitative histological variables (surface area of liver fibrosis per field, number of CD3 lymphocytes in the liver parenchyma per field and percentage of steatosis) we applied Pearson's correlation coefficient. Statistical significance was defined as P < 0.05. Laboratory parameters The HDL, LDL and triglyceride values showed no significant differences between the animals in the control group and obese group. Likewise, the ALT values showed no significant differences between the two groups. ARFI determinations and correlation analysis The SWVs obtained in the liver parenchymas of the control group gave an in vivo mean of 1 ± 0.2 m/s whereas the obese group showed a higher mean in the in vivo assessment (1.4 ± 0.1 m/s), with a statistically significant difference (P < 0.008). As for the ex vivo explorations the SWVs obtained were 0.7 ± 0.1 m/s in the control group and a significantly higher mean of 1.1 ± 0.2 m/s in the animals from the obese group (P < 0.001). With regard to the correlation for the quantitative histological liver variables, the whole study sample showed a positive and statistically significant correlation between the mean SWV obtained by ARFI and the surface area of liver fibrosis per field, in both the in vivo study (r = 0.623, P < 0.008) and ex vivo study (r = 0.706, P < 0.002), the ex vivo value being higher than the in vivo value. The correlation between the SWV with ARFI and the number of CD3+ T lymphocytes was positive and statistically significant in the in vivo study (r = 0.670, P < 0.003), but not statistically significant in the ex vivo study (r = 0.471, P < 0.056). If we observe the degree of correlation between the SWV with ARFI and the grade of liver steatosis we see a positive but not statistically significant correlation in the ex vivo study (r = 0.530, P = 0.029) and an absence of correlation in the in vivo study (r = 0.120, P < 0.647). Fig 5 shows the histological findings. Fig 6 shows that the mean surface area of liver fibrosis per field in the control group animals was 5423 ± 893 m 2, a result in contrast to the mean of 16006 ± 1818 m 2 obtained in the obese group, with statistically significant differences (P < 0.001). According to the METAVIR score the control group animals had no significant fibrosis (F0) whereas the obese group animals developed a mild fibrosis (F1). Fig 7 shows the degree of CD3+ T lymphocyte infiltration: the control group had a count of 8.1 ± 0.6 lymphocytes/field whereas the obese group showed a clearly higher value (22.4 ± 5.9 lymphocytes/ field) (P < 0.001). Histological findings Analysis of the degree of liver steatosis revealed that the control group animals did not show significant steatosis except one of the rats with a moderate lipid overload (E0 = 5; E2 = 1), whereas the obese group animals presented a significant increase in steatosis (P < 0.002), mostly with a medium or severe degree of fat overload (E1 = 2; E2 = 6; E3 = 3) (Fig 8). Discussion The present study is a comparative analysis between liver parenchyma assessment using the ARFI technique in in vivo experiments, with the animals placed in the supine position and under heavy sedation to avoid any possible cardiac or respiratory artefacts, and in ex vivo experiments, after euthanisation of the animals and with the liver explants introduced into a container with physiological serum to minimise possible sources of artefact, such as those related to physiological factors (breathing, heartbeat), and improve the conditions in which we took the measurements. Several factors have been described as capable of distorting SWVs acquired with ARFI. According to publications by Bruno et al when performing explorations with the ARFI technique the radiologist must take into consideration all these physical, geometrical, anatomical and physiological factors that can alter the measurements and lead to diagnostic errors of interpretation and possible wrong treatment. One of the most relevant is the depth at which the measurements are established. This factor particularly influences the study of voluminous organs like the liver. Independent studies conducted by D'Onofrio, Gallotti & Mucelli and Kamimuma et al show significant differences according to the depth at which the ROI is established in the liver parenchyma and observe that the deeper the ROI in the parenchyma the lower the SWVs. Furthermore, the study by Chang, Kim, Kim & Lee reports a considerable increase in the variability of measurements according to the depth of the ROI; they establish an ideal ROI depth of 2-3cm below the liver capsule using high-frequency probes, and 4-5cm when using low-frequency probes, in order to minimise dispersion in the results. Evaluation of rat liver with ARFI elastography: In vivo and ex vivo study Due to the small liver size in our study we selected a ROI including just the liver parenchyma and avoiding the liver capsule and large intrahepatic structures. Other factors reported in the literature, as the ultrasound frequencies used to determine the SWVs or the frequency bandwidth of generated shear waves, are controversial. The study developed by Chang et al claims that SWVs acquired with a low frequency probe had a tendency to be higher at the same depth, while Dillman et al found no significant differences between low and high frequency probes. Another study published by Kazemirad et al assessed liver shear stiffness using US elastography at low (40-130Hz) and high (130-220Hz) frequencies, obtaining better distinction of steatohepatitis categories at high frequencies. Throughout our experiment we used a 9L4 probe at either 4,44 or 5,71MHz, depending on the ROI depth, at high frequencies (123-158 Hz) to determine the SWVs. Regarding the influence of the extrinsic compression of the tissue with the transducer, there are several studies such as that by Syversveen et al which show significant changes in measurement depending on the degree of compression exerted, in this case in studies on the kidney parenchyma. In the in vivo model we exert minimum compression on the animal's abdominal wall to achieve a good exploratory window. In the ex vivo study we minimise this factor because the good transmission of the ultrasound beam through the serum used for the liver study in our experimental model enables us to avoid excessive compression for the collection of data. The arrangement of the ROIs parallel to the liver capsule has also proved relevant in determining ARFI values. Authors such as Chang et al emphasise this factor as a way of avoiding variability between measurements because the ultrasound beams interact with a larger number of interfaces and collect a greater amount of information. The possibilities of manipulating the position of the organs in the ex vivo studies made it easier to correctly align the ROIs. The influence of physiological factors, like heartbeat, respiratory movements or fasting, has also been reported on the accuracy of SWV measurements. The study published by Kamimuma et al, which analyses the liver parenchyma using ARFI, claims that the acquisition of measurements during deep breathing or following the ingestion of food does not distort the measurements, whereas other publications such as the study by Mederacke et al report significant differences between the fasting and post-ingestion states. In our ex vivo model we limited all the potential sources of physiological artefacts that might affect liver stiffness measurements. After conducting all the explorations in the two measurement models we observed that the mean SWV obtained in the liver parenchyma in the in vivo studies was significantly higher than that acquired in the ex vivo studies. Inclusion of the liver explants in a homogeneous medium, without artefacts caused by physiological processes and with the possibility of correctly aligning the organs to establish the ROIs in an optimal way, appears to reduce the SWVs in the ARFI assessment. Minimising the number of factors that can alter the measurements would make these measurements more accurate and closer to real parenchymal elasticity. These findings suggest that in addition to physiological artefacts and those related to measuring correctly, the abdominal cavity and adjacent structures might be factors that alter measurements. This might be interesting when assessing grades of mild and moderate fibrosis, where the ARFI technique shows considerable limitations due to the major overlapping of results. The ARFI technique has yielded promising results to establish itself as an important noninvasive diagnostic alternative for assessing NAFLD, even with normal laboratory values. With our Wistar rat model we found no significant differences between the control group and obese group for the results of the lipid profile or ALT levels, but we did obtain differences between the control animals and obese animals when using ARFI, which shows an important correlation with histology. This is especially relevant, as the ARFI technique could give us early identification of histological alterations, particularly in less advanced stages of NAFLD, where the laboratory parameters are normal. Various studies, such as that by Guzmn-Aroca et al, have obtained similar results in patients who, without presenting significant laboratory abnormalities, did show histopathological alterations in the liver. We cannot therefore exclude a diagnosis of NAFLD and histological alterations in patients who do not present laboratory abnormalities. In these patients the use of ARFI elastography may be differential when establishing suitable management to prevent the disease from evolving. Moreover, ARFI has shown very promising results for detecting and grading liver fibrosis in different stages ; this is the histological feature best studied by elastography and an important prognostic factor in the development of NAFLD. Our study revealed an important correlation between SWV and the grade of liver fibrosis according to the affected surface, in both the in vivo and ex vivo studies. This shows that by minimising possible sources of artefact for the detection and quantification of SWVs we obtain more reliable results for characterising liver fibrosis. Another of the aims of the ARFI technique is based on detecting liver fibrosis at early stages of the disease, when the fibrogenic process is in its initial stages, in order to establish means to prevent it from progressing. Some studies, such as that by Bota et al, claim that ARFI is capable of diagnosing F�2 liver fibrosis stages with a sensitivity of 74% and specificity of 83%, both of them rising to 87% when identifying F4. These results highlight the capacity of the ARFI technique for differentiating moderate and advanced stages of fibrosis from stages with non-significant or mild fibrosis. Another study, by Sporea et al, shows an important superposition of results in patients without fibrosis (F0) and patients with mild (F1) or moderate (F2) fibrosis. Although the grade of fibrosis developed by the animals in our study was quite low, we found significant differences between those that showed no significant liver fibrosis (F0 on the METAVIR scale) and those presenting with some degree of fibrosis (F�1), in both the in vivo and ex vivo studies. These results indicate that by adjusting the parameters correctly and optimising SWV measurement the ARFI technique might be a fundamental tool for differentiating healthy patients from those with an early stage of liver involvement. As for the capacity of ARFI to characterise hepatic steatosis and necroinflammatory processes, histological features that are also present in NAFLD and chronic hepatopathy of various origins, there is a greater degree of controversy in the literature. Fierbinteanu-Braticevici et al revealed in their research that the ARFI values presented an important negative correlation with the grade of hepatic steatosis, with a progressive reduction in SWVs as the grade of steatosis increased. However, another study by Nishikawa et al found no significant correlation between the SWVs acquired by ARFI and the degree of steatosis. We found no significant correlation in our animal model between the degree of lipid overload and the SWVs, in either the in vivo or ex vivo study, but we did detect significant differences between animals without significant lipid overload on the Brunt scale (E0) and animals with some degree of steatosis (E � 1) in both the in vivo and ex vivo studies. Our results suggest therefore that the ARFI technique could be useful for differentiating the absence of lipid overload from the other grades of hepatic steatosis, from mild to severe following a proper optimisation of SWV acquisition. As occurred in the case of steatosis, there is a certain degree of controversy in the assessment of necroinflammatory processes. The studies conducted by Fierbinteanu Batricevici et al. showed a certain degree of positive correlation with the degree of inflammation. Conversely, Yoneda et al reported SWV differences between groups with different degrees of inflammation but did not identify a gradual change in SWVs between the different degrees of inflammation. We only found a significant positive correlation in our experiment between the SWV values determined in vivo and the number of hepatic CD3 T lymphocytes obtained in the histological study; however, we were unable to detect a correlation in the ex vivo study. Further studies with the ARFI technique are necessary to see if it is useful in detecting and grading steatosis and liver inflammation in patients with NAFLD. An improved selection of ARFI parameters and ROI location, as in the case of fibrosis, might increase the accuracy of the technique in assessing fatty liver overload and degree of inflammation. Limitations and future research In our ex vivo study we cannot exclude the possible influence of physiological serum on the liver explant when assessing liver stiffness, nor can we rule out that the measurements are altered by the absence of hydrostatic blood pressure. It would be interesting in future research to obtain a model in which the acquisition of measurements depends solely on liver stiffness. We could therefore calculate a correction factor to try to adjust the values obtained in vivo. Moreover, it was not easy to perform the ARFI examination due to the small liver size and the fixed dimensions of the ROI. However, the SWV measurements were taken adequately. It is also worth noting that to avoid interobserver variability the ARFI technique was performed by an examiner with experience. However, there is shown to be an excellent interobserver agreement with the ARFI technique. Conclusions This is the first experimental study to determine SWVs in the liver with the ARFI technique both in vivo and ex vivo. The SWVs are seen to be higher in vivo than those measured ex vivo. This suggests that various factors may alter the liver stiffness measurements. Furthermore, we detected some very high correlation values between SWV and the degree of liver fibrosis in both the in vivo and ex vivo experiments. In addition, a greater degree of correlation was identified in the ex vivo study, which confirms that by planning and optimising the determinations with ARFI to limit sources of artefact we can obtain more reliable, more accurate and more reproducible results. We consider that further studies are needed to clarify the possible role of ARFI in this issue. Supporting information S1 Table. Animal group and quantitative and semiquantitative variables. (XLSX)
/** * Add outbound port definition to the outbound port definition list * * @param portDef outbound port definition * @return ClientNetworkConfig */ public ClientNetworkConfig addOutboundPortDefinition(String portDef) { if (outboundPortDefinitions == null) { outboundPortDefinitions = new HashSet<String>(); } outboundPortDefinitions.add(portDef); return this; }
MOLECULAR AND GENETIC CHARACTERIZATION OF SOME Rhizobium leguminosarum bv. vicieae isolates. Rhizobium leguminosarum bv. vicieae isolates which isolated and fully characterized in Agric. Microbiol. Dept., Agric. Fac., Zagazig Univ. were used in this work to study molecular and genetic diverisity.24 isolates were used to investigate lysogenicity ability. The results showed that 23 isolates from 24 were lysogen with lysogenecity percent 96%. 19 lysogenic isolates were contained more than one different prophage, since the phage released from them was able to lysis the same lysogenic isolate which released from it. The isolates RA21, RF12, RF13 and RR13 were contained one prophage only, since the phage released was not able to lysis the same lysogenic host. Most bacterial isolates were sensitive to streptomycin, ampicillin and chloramphenicol at concentrations from 100 to 2000 g/ ml.The RR23 isolate was resistance to ampicillin and chloramphenicol at the used concentrations. The RA11 and RR11 isolates were resistanc to ampicillin up to 1500 g/ ml and chloramphenicol up to 500 g/ ml. The RK11 and RF12 isolates were resistance to streptomycin up to 500 g/ ml. Five rhizobiophages were used in this study. These phages were isolated from soil. The host range of these phages was studied by using the bacterial isolates as hosts. The phages A32, R11 and H21 were able to lysis all the used hosts. The phage K23 was lysis 10 from 11 host, while phage F13 was lysis 7 from 10 host. The plaque forming units (pfu/ ml) of these phages were varied, it ranged from 2.2 106 to 9.67 1013. The ability of these phages to transduce some antibiotic resistance genes was assessed. The five phages were able to successfully transduce streptomycin and chloramphenicol resistance genes. Transduction frequency ranged from 0.39 10-8 to 4.5 10-5 for streptomycin and from 1.25 10-8 to 1.3 10-3 for chloramphenicol. Not all phages were able to transduce the ampicillin resistance gene. Also, transducing this marker was not success with all the recipients. Tranceduction frequency ranged from 4.310-9 to 9.4 10-8. The five phages were able to cotransduce streptomycin and chloramphenicol resistance gene together, tranceduction frequency ranged from 5.0 10-5 to 2.97 10-3. Random amplified polymorphic DNA polymerase chain reaction (RAPDPCR) analyses were performed by genomic DNA extracted from 10 isolates of
Arian Foster Baby Mama I'm a HUUUGE Gold Digger Arian Foster Baby Mama -- I'm a HUUUGE Gold Digger EXCLUSIVE The woman carrying's fetus is a gold digger ... and to prove it, she DRESSED UP LIKE ONE FOR HALLOWEEN ... andhas the photo.Sources tell usdonned the outfit for Halloween 2012 -- she posted the photo to her Instagram page along with the caption, "Now I ain't saying she a golddigger ... no wait yes she is."Of course, the pic is interesting since Arian filed legal docs painting Brittany as a shameless fame whore who's trying to exploit the pregnancy to get a reality TV show.And there's this ... Arian isn't the first pro athlete Brittany's been linked to. We know she was tight with NBA star Chase Budinger, who played for the Houston Rockets at the time.In fact, she visited Chase in San Diego back in 2012. Unclear if they're still friendly.She ain't messin' with no broke ...
package com.manning.hip.ch3; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.Text; public class TextArrayWritable extends ArrayWritable { public TextArrayWritable() { super(Text.class); } public TextArrayWritable(Text[] strings) { super(Text.class, strings); } }
<gh_stars>0 #ifndef __LISTA_CODIGO__ #define __LISTA_CODIGO__ typedef enum { OPERACION, ARGUMENTO1, ARGUMENTO2, RESULTADO } Campo; /* Una operación MIPS tiene, en el caso más complejo: - op: código de operación, como "add", "mv", etc. - arg1: argumento 1 de la operación, como "$t0", etc. - arg2: argumento 2 de la operación, como "$t1", etc. - res: resultado de la operación, como "$t3", etc. Si algún campo no se usa, se marca con NULL. */ typedef struct { char * op; char * res; char * arg1; char * arg2; } Operacion; /* ListaC es una lista enlazada de código, que contiene instancias de Operacion */ typedef struct ListaCRep * ListaC; typedef struct PosicionListaCRep *PosicionListaC; /* Crea una lista de código */ ListaC creaLC(); /* Destruye una lista de código */ void liberaLC(ListaC codigo); /* Inserta una nueva operación en la lista de código, en la posición indicada */ void insertaLC(ListaC codigo, PosicionListaC p, Operacion o); /* Recupera la operación que ocupa la posición indicada */ Operacion recuperaLC(ListaC codigo, PosicionListaC p); /* Busca una operación en la lista, a partir de cierta posición, con un valor en cierto campo */ PosicionListaC buscaLC(ListaC codigo, PosicionListaC p, char *clave, Campo campo); /* Asigna una operación a cierta posición de la lista */ void asignaLC(ListaC codigo, PosicionListaC p, Operacion o); /* Concatena dos listas de código. La primera lista se modifica para formar el resultado */ void concatenaLC(ListaC codigo1, ListaC codigo2); /* Longitud de una lista de código */ int longitudLC(ListaC codigo); /* Posición de comienzo de una lista de código */ PosicionListaC inicioLC(ListaC codigo); /* Posición de final de una lista de código */ PosicionListaC finalLC(ListaC codigo); /* Posición siguiente de una dada en una lista de código */ PosicionListaC siguienteLC(ListaC codigo, PosicionListaC p); /* Almacena el registro resultado de una lista de código */ void guardaResLC(ListaC codigo, char *res); /* Recupera el registro resultado de una lista de código */ char * recuperaResLC(ListaC codigo); #endif
<reponame>deepld/yugabyte-db<gh_stars>1-10 // Copyright (c) YugaByte, Inc. package controllers; import play.api.mvc.AnyContent; import play.mvc.Controller; import play.mvc.Result; import javax.inject.Inject; public class UIController extends Controller { @Inject Assets assets; public play.api.mvc.Action<AnyContent> index() { return assets.at("/public", "index.html", false); } public play.api.mvc.Action<AnyContent> assetOrDefault(String resource) { if (resource.startsWith("static") || resource.contains(".css") || resource.contains(".ico")) { // Route any static files through the assets path. return assets.at("/public", resource, false); } else { return index(); } } // UI Controller wouldn't serve the API calls. public Result unknown(String resource) { return notFound(String.format("%s not found", resource)); } }
Calculation of long jump performance by numerical integration of the equation of motion. The aerial phase of the long jump is calculated by numerical integration of the equations of motion. Consideration is given to the effects on performance of the horizontal and vertical components of velocity at takeoff, aerodynamic drag, wind assistance and the vertical displacement of the center of mass which occurs during the course of the jump. For still air conditions it is shown that an analytical solution due to Lamb compares very favorably with the numerical solution, providing an excellent description of the trajectory. Calculations neglecting the effect of aerodynamic drag are shown to overestimate the jump distance of world-class athletes by from 9 to 11 cm under still air conditions.
package db import ( "fmt" "strconv" "time" "github.com/pkg/errors" "github.com/spoke-d/thermionic/internal/version" ) // Nodes returns all nodes part of the cluster. // // If this instance is not clustered, a list with a single node whose // address is 0.0.0.0 is returned. func (c *ClusterTx) Nodes() ([]NodeInfo, error) { return c.nodes(false, "") } // NodeAddress returns the address of the node this method is invoked on. func (c *ClusterTx) NodeAddress() (string, error) { stmt := "SELECT address FROM nodes WHERE id=?" addresses, err := c.query.SelectStrings(c.tx, stmt, c.nodeID) if err != nil { return "", errors.WithStack(err) } switch len(addresses) { case 0: return "", nil case 1: return addresses[0], nil default: return "", errors.Errorf("inconsistency: non-unique node ID") } } // NodeHeartbeat updates the heartbeat column of the node with the given address. func (c *ClusterTx) NodeHeartbeat(address string, heartbeat time.Time) error { stmt := "UPDATE nodes SET heartbeat=? WHERE address=?" result, err := c.tx.Exec(stmt, heartbeat, address) if err != nil { return errors.WithStack(err) } n, err := result.RowsAffected() if err != nil { return errors.WithStack(err) } if n != 1 { return errors.Errorf("expected to update one row and not %d", n) } return nil } // NodeAdd adds a node to the current list of nodes that are part of the // cluster. It returns the ID of the newly inserted row. func (c *ClusterTx) NodeAdd(name, address string, schema, api int) (int64, error) { columns := []string{ "name", "address", "schema", "api_extensions", } values := []interface{}{ name, address, schema, api, } return c.query.UpsertObject(c.tx, "nodes", columns, values) } // NodePending toggles the pending flag for the node. A node is pending when // it's been accepted in the cluster, but has not yet actually joined it. func (c *ClusterTx) NodePending(id int64, pending bool) error { value := 0 if pending { value = 1 } result, err := c.tx.Exec("UPDATE nodes SET pending=? WHERE id=?", value, id) if err != nil { return errors.WithStack(err) } n, err := result.RowsAffected() if err != nil { return errors.WithStack(err) } if n != 1 { return errors.Errorf("query updated %d rows instead of 1", n) } return nil } // NodeUpdate updates the name an address of a node. func (c *ClusterTx) NodeUpdate(id int64, name, address string) error { result, err := c.tx.Exec("UPDATE nodes SET name=?, address=? WHERE id=?", name, address, id) if err != nil { return errors.WithStack(err) } n, err := result.RowsAffected() if err != nil { return errors.WithStack(err) } if n != 1 { return errors.Errorf("query updated %d rows instead of 1", n) } return nil } // NodeRemove removes the node with the given id. func (c *ClusterTx) NodeRemove(id int64) error { result, err := c.tx.Exec("DELETE FROM nodes WHERE id=?", id) if err != nil { return errors.WithStack(err) } n, err := result.RowsAffected() if err != nil { return errors.WithStack(err) } if n != 1 { return errors.Errorf("query deleted %d rows instead of 1", n) } return nil } // NodeByName returns the node with the given name. func (c *ClusterTx) NodeByName(name string) (NodeInfo, error) { nodes, err := c.nodes(false, "name=?", name) if err != nil { return NodeInfo{}, err } switch len(nodes) { case 0: return NodeInfo{}, ErrNoSuchObject case 1: return nodes[0], nil default: return NodeInfo{}, errors.Errorf("more than one node matches") } } // NodeByAddress returns the pending node with the given network address. func (c *ClusterTx) NodeByAddress(address string) (NodeInfo, error) { nodes, err := c.nodes(false, "address=?", address) if err != nil { return NodeInfo{}, err } switch len(nodes) { case 0: return NodeInfo{}, ErrNoSuchObject case 1: return nodes[0], nil default: return NodeInfo{}, errors.Errorf("more than one node matches") } } // NodePendingByAddress returns the pending node with the given network address. func (c *ClusterTx) NodePendingByAddress(address string) (NodeInfo, error) { nodes, err := c.nodes(true, "address=?", address) if err != nil { return NodeInfo{}, err } switch len(nodes) { case 0: return NodeInfo{}, ErrNoSuchObject case 1: return nodes[0], nil default: return NodeInfo{}, errors.Errorf("more than one node matches") } } // NodeName returns the name of the node this method is invoked on. func (c *ClusterTx) NodeName() (string, error) { stmt := "SELECT name FROM nodes WHERE id=?" names, err := c.query.SelectStrings(c.tx, stmt, c.nodeID) if err != nil { return "", errors.WithStack(err) } switch len(names) { case 0: return "", nil case 1: return names[0], nil default: return "", errors.Errorf("inconsistency: non-unique node ID") } } // NodeRename changes the name of an existing node. // // Return an error if a node with the same name already exists. func (c *ClusterTx) NodeRename(old, new string) error { count, err := c.query.Count(c.tx, "nodes", "name=?", new) if err != nil { return errors.Wrap(err, "failed to check existing nodes") } if count != 0 { return ErrAlreadyDefined } stmt := `UPDATE nodes SET name=? WHERE name=?` result, err := c.tx.Exec(stmt, new, old) if err != nil { return errors.Wrap(err, "failed to update node name") } n, err := result.RowsAffected() if err != nil { return errors.Wrap(err, "failed to get rows count") } if n != 1 { return errors.Errorf("expected to update one row, not %d", n) } return nil } // NodesCount returns the number of nodes in the cluster. // // Since there's always at least one node row, even when not-clustered, the // return value is greater than zero func (c *ClusterTx) NodesCount() (int, error) { count, err := c.query.Count(c.tx, "nodes", "") return count, errors.Wrap(err, "failed to count existing nodes") } // NodeIsEmpty returns an empty string if the node with the given ID has // anything associated with it. Otherwise, it returns a message say what's left. func (c *ClusterTx) NodeIsEmpty(id int64) (string, error) { // Note: there is currently nothing at the moment to identify if a node is // empty, so we just return nothing. return "", nil } // NodeIsOutdated returns true if there's some cluster node having an API or // schema version greater than the node this method is invoked on. func (c *ClusterTx) NodeIsOutdated() (bool, error) { nodes, err := c.nodes(false, "") if err != nil { return false, errors.Wrap(err, "failed to fetch nodes") } // Figure our own version. ver := [2]int{} for _, node := range nodes { if node.ID == c.nodeID { ver = node.Version() } } if ver[0] == 0 || ver[1] == 0 { return false, errors.Errorf("inconsistency: local node not found") } // Check if any of the other nodes is greater than us. for _, node := range nodes { if node.ID == c.nodeID { continue } n, err := version.CompareVersions(node.Version(), ver) if err != nil { return false, errors.Wrapf(err, "failed to compare with version of node %s", node.Name) } if n == 1 { // The other node's version is greater than ours. return true, nil } } return false, nil } // NodeUpdateVersion updates the schema and API version of the node with the // given id. This is used only in tests. func (c *ClusterTx) NodeUpdateVersion(id int64, version [2]int) error { stmt := "UPDATE nodes SET schema=?, api_extensions=? WHERE id=?" result, err := c.tx.Exec(stmt, version[0], version[1], id) if err != nil { return errors.Wrap(err, "failed to update nodes table") } n, err := result.RowsAffected() if err != nil { return errors.Wrap(err, "failed to get affected rows") } if n != 1 { return errors.Errorf("expected exactly one row to be updated") } return nil } // NodeOfflineThreshold returns the amount of time that needs to elapse after // which a series of unsuccessful heartbeat will make the node be considered // offline. func (c *ClusterTx) NodeOfflineThreshold() (time.Duration, error) { threshold := time.Duration(ClusterDefaultOfflineThreshold) * time.Second values, err := c.query.SelectStrings( c.tx, "SELECT value FROM config WHERE key='cluster.offline_threshold'") if err != nil { return -1, errors.WithStack(err) } if len(values) > 0 { seconds, err := strconv.Atoi(values[0]) if err != nil { return -1, errors.WithStack(err) } threshold = time.Duration(seconds) * time.Second } return threshold, nil } // Nodes returns all nodes part of the cluster. func (c *ClusterTx) nodes(pending bool, where string, args ...interface{}) ([]NodeInfo, error) { var nodes []NodeInfo dest := func(i int) []interface{} { nodes = append(nodes, NodeInfo{}) return []interface{}{ &nodes[i].ID, &nodes[i].Name, &nodes[i].Address, &nodes[i].Description, &nodes[i].Schema, &nodes[i].APIExtensions, &nodes[i].Heartbeat, } } if pending { args = append([]interface{}{1}, args...) } else { args = append([]interface{}{0}, args...) } stmt := `SELECT id, name, address, description, schema, api_extensions, heartbeat FROM nodes WHERE pending=? ` if where != "" { stmt += fmt.Sprintf("AND %s ", where) } stmt += "ORDER BY id" err := c.query.SelectObjects(c.tx, dest, stmt, args...) return nodes, errors.Wrap(err, "failed to fetch nodes") }
Agglomeration Externalities and Skill Upgrading in Local Labor Markets: Evidence from Prefecture-Level Cities of China Skill upgrading, the increase in the percentage of skilled workers in the employment population, boosts the economic growth of developing countries and sustains their industrial competitiveness. The international economics literature discusses the effects of international trade on skill upgrading, ignoring the potential role of agglomeration externalities. This paper takes China as a case study, which has been encountering a serious challenge about how to strengthen its industrial competitiveness in the world through skill upgrading as its population dividend decreases. The panel data of 2005, 2010 and 2015 from prefecture-level cities in China were used for regression analysis to explore the benefits from agglomeration externalities, including specialization and diversification effects, on skill upgrading. The results show that both the specialization effect and diversification effect do promote skill upgrading. Furthermore, there are significant differences in the influence of local agglomeration externalities across different regions, and the positive effect brought about by specialization externalities is usually dominant in undeveloped, inland or small cities, compared with the diversification in developed or coastal cities. Besides, manufacturing agglomerations exhibit positive externalities to skill upgrading mainly through specialization, while the service agglomerations mainly promote skill upgrading by means of diversification.
<filename>var/spack/repos/builtin/packages/r-quantmod/package.py # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RQuantmod(RPackage): """Specify, build, trade, and analyse quantitative financial trading strategies.""" homepage = "http://www.quantmod.com/" url = "https://cran.r-project.org/src/contrib/quantmod_0.4-5.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/quantmod" version('0.4-10', 'e4119c673567801eee16dcbbd0265de8') version('0.4-5', 'cab3c409e4de3df98a20f1ded60f3631') depends_on('r-xts', type=('build', 'run')) depends_on('r-zoo', type=('build', 'run')) depends_on('r-ttr', type=('build', 'run')) depends_on('r-curl', type=('build', 'run'))
def quadrature_scheme_lognormal_quantiles( loc, scale, quadrature_size, validate_args=False, name=None): with tf.name_scope(name or 'quadrature_scheme_lognormal_quantiles'): dist = transformed_distribution.TransformedDistribution( distribution=normal.Normal(loc=loc, scale=scale), bijector=exp_bijector.Exp(), validate_args=validate_args) batch_ndims = tensorshape_util.rank(dist.batch_shape) if batch_ndims is None: batch_ndims = tf.shape(dist.batch_shape_tensor())[0] def _compute_quantiles(): zero = tf.zeros([], dtype=dist.dtype) edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1] edges = tf.reshape( edges, shape=tf.concat( [[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0)) quantiles = dist.quantile(edges) perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0) quantiles = tf.transpose(a=quantiles, perm=perm) return quantiles quantiles = _compute_quantiles() grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2. new_shape = tensorshape_util.concatenate(dist.batch_shape, [quadrature_size]) tensorshape_util.set_shape(grid, new_shape) probs = tf.fill( dims=[quadrature_size], value=tf.math.reciprocal(tf.cast(quadrature_size, dist.dtype))) return grid, probs
<gh_stars>100-1000 """ Regression tests for the interaction between model inheritance and select_related(). """ from django.db import models class Place(models.Model): name = models.CharField(max_length=50) class Meta: ordering = ('name',) def __unicode__(self): return u"%s the place" % self.name class Restaurant(Place): serves_sushi = models.BooleanField() serves_steak = models.BooleanField() def __unicode__(self): return u"%s the restaurant" % self.name class Person(models.Model): name = models.CharField(max_length=50) favorite_restaurant = models.ForeignKey(Restaurant) def __unicode__(self): return self.name
Since Albert Einstein first predicted their existence a century ago, physicists have been on the hunt for gravitational waves, ripples in the fabric of spacetime. That hunt is now over. Gravitational waves exist, and we’ve found them. That’s according to researchers at the Laser Interferometer Gravitational Wave Observatory (LIGO), who have been holed up for weeks, working round-the-clock to confirm that the very first direct detection of gravitational waves is the real deal. False signals have been detected before, and even though the rumors first reported by Gizmodo have been flying for a month, the LIGO team wanted to be absolutely certain before making an official announcement. That announcement has just come. Gravitational waves were observed on September 14th, 2015, at 5:51 am ET by both of the LIGO detectors, located in Livingston, Louisiana, and Hanford, Washington. The source? A supermassive black hole collision that took place 1.3 billion years ago. When it occurred, about three times the mass of the sun was converted to energy in a fraction of a second. The discovery has been accepted for publication in Physical Review Letters. Gravitational waves are ripples in the universe caused by some of the most energetic cosmic events, from exploding stars to supermassive black hole mergers. As they propagate through space and time, gravitational waves cause tiny tremors in atoms that make up matter. While Einstein predicted them in his general theory of relativity in 1916, and their existence was indirectly demonstrated in the 1980s, it wasn’t until the LIGO detector came online in 2002 that the hunt for elusive spacetime ripples started to get serious. But the first generation LIGO experiment, which ran for eight years, wasn’t sensitive enough. Which is understandable. Gravitational waves are minuscule— the atomic jitters that pass through our world when two black holes bash together in a distant galaxy are on the order of a billionth of a billionth the diameter of an atom. LIGO detects them by proxy, using high powered lasers to measure tiny changes in the distance between two objects positioned thousands of miles apart. A million things can screw this up, including a rumbling freight train, a tremor in the Earth, and the inconvenient reality that all objects with a temperature above absolute zero are vibrating all the time. After a series of upgrades that lasted from 2010 to 2015, LIGO was back online this past fall. With more powerful lasers and improved system for isolating the experiment from vibrations in the ground, the prospects of detecting the first gravitational waves have never looked better. Some scientists even predicted that we’d have our first positive detection in 2016—but few could have known how quickly it would come. In fact, LIGO saw gravitational waves almost immediately. The team then spent the entire fall exhaustively investigating potential instrumental and environmental disturbances to confirm that the signal was real. According to Einstein’s theory of relativity, when a pair of black holes orbit on another, they lose energy slowly, causing them to creep gradually closer. In the final minutes of their merger, they speed up considerably, until finally, moving at about half the speed of light, they bash together, forming a larger black hole. A tremendous burst of energy is released, propagating through space as gravitational waves. The two black holes behind the all the hubbub are 29 and 36 times the mass of the Sun, respectively. During the peak of their cosmic collision, LIGO researchers estimate that their power output was 50 times that of the entire visible universe. The discovery of gravitational waves has been an open secret for weeks now. The scientists’ own excitement got the better of them on several occasions, including last week, when theoretical physicist Clifford Burgess at McMaster University in Hamilton, Canada, sent an email to his entire department, telling them that LIGO had found a real, and “spectacular,” signal of two large black holes merging. Now, the muzzle has been lifted and the physicists can geek out at the top of their lungs. Keep an eye on social media today, it should be a ruckus. The discovery of gravitational waves confirms an important aspect of the theory of relativity, but it does much more than that. Quite literally, it opens up a new chapter in our exploration of the cosmos, one where electromagnetic radiation is no longer our only tool for “seeing” the universe. As MIT astrophysicist Scott Hughes told Gizmodo in a phone interview, we can use gravitational waves to probe mysterious celestial objects like black holes and neutron stars, which typically no light. Hughes also notes that once our detectors are sensitive enough to catch gravitational waves regularly, we can start to build a census of the universe’s most energetic events. “Actually getting some demographic data is one of the key things we hope to do in an era of detection,” he said. A century-long hunt is over. But a new cosmic exploration is just beginning.
<gh_stars>0 from w1thermsensor import W1ThermSensor import RPi.GPIO as GPIO from datetime import datetime import time import sqlite3 GPIO.setmode(GPIO.BCM) blue_led = 16 orange_led = 20 red_led = 21 GPIO.setup(blue_led,GPIO.OUT) GPIO.setup(orange_led,GPIO.OUT) GPIO.setup(red_led,GPIO.OUT) def light_reset(): GPIO.output(blue_led,GPIO.LOW) GPIO.output(orange_led,GPIO.LOW) GPIO.output(red_led,GPIO.LOW) def blink_light(chosen_light): GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) time.sleep(0.25) GPIO.output(chosen_light,GPIO.LOW) time.sleep(0.25) GPIO.output(chosen_light,GPIO.HIGH) def main(): db_filename = 'temp.db' connection = sqlite3.connect(db_filename) cursor = connection.cursor() cursor.execute('''CREATE TABLE if not exists stephs ( time date primary key, temp float)''') sensor = W1ThermSensor() while True: try: temp = sensor.get_temperature(W1ThermSensor.DEGREES_F) print("writing " + str(temp)) if temp < 70.0: chosen_light = blue_led elif temp >= 70.0 and temp <= 75.0: chosen_light = orange_led elif temp > 75.0: chosen_light = red_led else: light_reset() GPIO.output(blue_led,GPIO.HIGH) GPIO.output(orange_led,GPIO.HIGH) GPIO.output(red_led,GPIO.HIGH) light_reset() GPIO.output(chosen_light,GPIO.HIGH) cursor.execute("INSERT INTO stephs (time, temp) VALUES (?, ?)", (datetime.now(),temp)) connection.commit() time.sleep(2) blink_light(chosen_light) except KeyboardInterrupt: connection.close() GPIO.cleanup() if __name__ == "__main__":main()
<reponame>chsami/sach-mention<gh_stars>0 /* tslint:disable */ /** * This is an autogenerated file created by the Stencil compiler. * It contains typing information for all components that exist in this project. */ import '@stencil/core'; import { EventEmitter, } from '@stencil/core'; export namespace Components { interface SachMention { /** * Set the amount of time, in milliseconds, to wait to trigger the `onChange` event after each keystroke. */ 'debounce': number; 'delimiter': string; 'dictionary': Array<{ key: string; value: string }>; /** * if true ignores casing when matching strings */ 'ignoreCase': boolean; 'itemClick': any; 'itemTemplate': (key: any, value: any) => string; 'menuTemplate': (value: any) => string; 'searchTermLength': number; } interface SachMentionAttributes extends StencilHTMLAttributes { /** * Set the amount of time, in milliseconds, to wait to trigger the `onChange` event after each keystroke. */ 'debounce'?: number; 'delimiter'?: string; 'dictionary'?: Array<{ key: string; value: string }>; /** * if true ignores casing when matching strings */ 'ignoreCase'?: boolean; 'itemClick'?: any; 'itemTemplate'?: (key: any, value: any) => string; 'menuTemplate'?: (value: any) => string; /** * Emitted when a keyboard input ocurred. */ 'onInputEvent'?: (event: CustomEvent<KeyboardEvent>) => void; 'onOnChange'?: (event: CustomEvent<string>) => void; 'onOnFocus'?: (event: CustomEvent<void>) => void; 'searchTermLength'?: number; } } declare global { interface StencilElementInterfaces { 'SachMention': Components.SachMention; } interface StencilIntrinsicElements { 'sach-mention': Components.SachMentionAttributes; } interface HTMLSachMentionElement extends Components.SachMention, HTMLStencilElement {} var HTMLSachMentionElement: { prototype: HTMLSachMentionElement; new (): HTMLSachMentionElement; }; interface HTMLElementTagNameMap { 'sach-mention': HTMLSachMentionElement } interface ElementTagNameMap { 'sach-mention': HTMLSachMentionElement; } export namespace JSX { export interface Element {} export interface IntrinsicElements extends StencilIntrinsicElements { [tagName: string]: any; } } export interface HTMLAttributes extends StencilHTMLAttributes {} }
<reponame>r4ulill0/gui_dbjudge # -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/media/raul/OS/Users/king_/Desktop/carrera/curso2018-2019/2oCuatri/TFG/gui_dbjudge/sql_judge/view/qt_view/questions/question_row.ui' # # Created by: PyQt5 UI code generator 5.13.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_QuestionRow(object): def setupUi(self, QuestionRow): QuestionRow.setObjectName("QuestionRow") QuestionRow.resize(736, 112) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(QuestionRow.sizePolicy().hasHeightForWidth()) QuestionRow.setSizePolicy(sizePolicy) self.gridLayout_2 = QtWidgets.QGridLayout(QuestionRow) self.gridLayout_2.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize) self.gridLayout_2.setObjectName("gridLayout_2") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize) self.gridLayout.setHorizontalSpacing(20) self.gridLayout.setObjectName("gridLayout") spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.gridLayout.addItem(spacerItem, 0, 3, 1, 1) self.answer_label = QtWidgets.QLabel(QuestionRow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.answer_label.sizePolicy().hasHeightForWidth()) self.answer_label.setSizePolicy(sizePolicy) self.answer_label.setMinimumSize(QtCore.QSize(40, 40)) self.answer_label.setLayoutDirection(QtCore.Qt.LeftToRight) self.answer_label.setText("") self.answer_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.answer_label.setObjectName("answer_label") self.gridLayout.addWidget(self.answer_label, 0, 2, 2, 1) self.gridLayout_3 = QtWidgets.QGridLayout() self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize) self.gridLayout_3.setObjectName("gridLayout_3") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.gridLayout_3.addItem(spacerItem1, 0, 1, 1, 1) self.keywords_button = QtWidgets.QPushButton(QuestionRow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.keywords_button.sizePolicy().hasHeightForWidth()) self.keywords_button.setSizePolicy(sizePolicy) self.keywords_button.setObjectName("keywords_button") self.gridLayout_3.addWidget(self.keywords_button, 0, 0, 1, 1) self.delete_button = QtWidgets.QPushButton(QuestionRow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.delete_button.sizePolicy().hasHeightForWidth()) self.delete_button.setSizePolicy(sizePolicy) self.delete_button.setObjectName("delete_button") self.gridLayout_3.addWidget(self.delete_button, 0, 2, 1, 1) self.gridLayout.addLayout(self.gridLayout_3, 0, 4, 2, 1) self.question_label = QtWidgets.QLabel(QuestionRow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.question_label.sizePolicy().hasHeightForWidth()) self.question_label.setSizePolicy(sizePolicy) self.question_label.setMinimumSize(QtCore.QSize(40, 40)) self.question_label.setText("") self.question_label.setObjectName("question_label") self.gridLayout.addWidget(self.question_label, 0, 0, 2, 2) self.gridLayout.setColumnStretch(0, 1) self.gridLayout.setColumnStretch(1, 1) self.gridLayout.setColumnStretch(2, 2) self.gridLayout.setColumnStretch(4, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1) self.retranslateUi(QuestionRow) QtCore.QMetaObject.connectSlotsByName(QuestionRow) def retranslateUi(self, QuestionRow): _translate = QtCore.QCoreApplication.translate QuestionRow.setWindowTitle(_translate("QuestionRow", "Form")) self.keywords_button.setText(_translate("QuestionRow", "Palabras clave")) self.delete_button.setText(_translate("QuestionRow", "Borrar pregunta"))
Effect of Text Coherence on Item Difficulty for the Most Difficult Questions in the 2019 College Scholastic Aptitude Test This study was motivated by the notorious difficulty of the English section of the 2019 College Scholastic Aptitude Test (CSAT). While there have been a large number of studies examining what factors determine the difficulty of ESL reading questions, text coherence has been paid little attention due to the highly linguistic orientation of the language testing field. It was generally agreed in many studies that text itself does not bear a direct relationship with its coherence. Instead, the coherence reflects the readers mental representation of the text elicited from it. During the process of drawing the mental representation, the reader both depends on the information explicitly presented in the text and make necessary inferences to fill in the missing parts that he or she failed to discover from the text. In order to explore the influence of text coherence on the difficulty of the reading questions, this study analyzed the most difficult five questions of the English section of the 2019 CSAT using the text analysis paradigm of Mann and Thompsons Rhetorical Structure Theory and attempted to identify whether there are any elements to interfere with the drawing of the mental representation and whether the interrupting elements could lead to the low correct response rate. The analyses demonstrated that the texts for all five questions include some structural elements that might lead many test-takers to trouble in drawing the mental representation and, consequently, making correct answers.
The decision will be made by the City Council, which will debate the issue on Tuesday. City officials say the full closure would shorten construction from 34 months to 24 months and shave $4 million off the cost, projected at between $90 million and $125 million. The updated plan also calls for building a ballroom now, instead of in a second phase – a change that could save the city another $15 million or more – if local hotels help pay for it. The renovation also would add exhibit space, meeting rooms, a new kitchen and lobbies so that the center would have less down time between conventions. Mayor Darrell Steinberg, who pushed for a slimmed-down expansion to free up money for other tourist destination projects, supports the accelerated schedule. There is no ideal solution to the construction, says Mike Testa, president and CEO of Visit Sacramento, which books events in the center. But after talking to major customers and officials in other cities that recently expanded their convention halls, the conclusion was that faster completion would mean less lost business, he said. Under the current timetable, construction would start in spring 2018 and end in late fall 2020. Under the new schedule, some secondary work would not begin until January 2019. The center would close in July 2019 and reopen for business in November 2020. Just how painful depends on how many events the center loses. Testa says Visit Sacramento is pushing for the heavy construction not to begin until July 2019 because several major conventions are scheduled for the first half of that year. Avoiding pain also depends on making a smooth transition to other venues for the 50-plus events scheduled during the construction. Testa says he is close to deals with three big clients to move their conventions. The main replacement sites are Cal Expo and Sleep Train Arena in Natomas. While Cal Expo has the space, it will need to be spruced up to have similar aesthetics to the convention center, he says. Sleep Train has been shuttered since December 2016 so may need some upkeep as well. Using it this way should only be a stopgap; there still needs to be a reuse or redevelopment, and soon. While Golden 1 Center is also under consideration, there’s a bigger challenge due to scheduling. With the arena busy with Kings games and concerts, it’s difficult to get several days in a row for a convention. Golden 1, however, does have the advantage of keeping convention-goers downtown. Michael Ault, executive director of the Downtown Sacramento Partnership, says his group is still reviewing the proposed schedule and says the impact on tenants and nearby businesses must be weighed and minimized. The city says that the $4 million in construction savings would more than offset lower hotel tax revenues while the center is closed. The upside of finishing the project sooner would far exceed losses in rental fees at the center, or in receipts for downtown businesses, says Steve Hammond, who led Visit Sacramento for nearly 20 years before retiring in June. With a relatively small impact in the short term, “in the long term, it’s going to be a huge boon for downtown,” Hammond says. The construction schedule is the latest problem facing a project that has taken a long time to get going. The expanded center is the centerpiece of a shiny new tourist district, in concert with a makeover of the Community Center Theater. The theater will also go dark during some of the same time for its own renovation, but the city says that will reduce disruption to events there and at the convention center. The $83 million theater refurbishment is to start in fall 2018 and be complete by spring 2021. During their 2019-20 seasons, the California Musical Theatre, Sacramento Ballet and Sacramento Philharmonic & Opera would perform at Memorial Auditorium, which is undergoing improvements now. This timing issue is unfortunate, but when key projects have been stalled this long, it’s apparently the price of progress in Sacramento.
Effects of silage protein degradability and fermentation acids on metabolizable protein concentration: a meta-analysis of dairy cow production experiments. A meta-analysis was conducted using data from dairy cow production studies to evaluate silage metabolizable protein (MP) concentrations. The data consisted of 397 treatment means in 130 comparisons, in which the effects of silage factors (e.g., date of harvest, wilting, silage additives) were investigated. Within a comparison, a fixed amount of the same concentrate was fed. A prerequisite of data to be included in the analysis was that silage dry matter (DM), crude protein (CP), ammonia N, lactic acid (LA), and total acid (TA) concentrations and digestibility were determined. A smaller data set (n = 248) comprised studies in which silage water-soluble N concentration was also analyzed. The supply of MP was estimated as amino acids absorbed from the small intestine using a model with constant values for ruminal effective protein degradability (EPD) and intestinal digestibility of rumen undegraded protein. Microbial protein was calculated on the basis of digestible carbohydrates and rumen degradable protein (RDP). Alternative models were used to estimate microbial protein formation, assuming the energy values of RDP and TA to be equivalent to 1.00, 0.75, 0.50, 0.25, and 0 times that of digestible carbohydrates. Because EPD values are seldom determined in production trials, they were derived using empirical models that estimate them from other feed components. The goodness of fit of models was compared on the basis of root mean squared error (RMSE) of milk protein yield (MPY) predicted from MP supply (adjusted for random study effect) and Akaike's information criterion. Metabolizable protein supply calculated from basal assumptions predicted MPY precisely within a study (RMSE = 16.2 g/d). Variable contribution of RDP to the energy supply for microbial synthesis influenced the precision of MPY prediction very little, but RMSE for MPY increased markedly when the energy supply of rumen microbes was corrected for TA concentration. Using predicted rather than constant EPD values also increased RMSE of MPY prediction. These observations do not mean that the supply of MP from undegraded feed protein is constant. However, it suggests that our current methods overestimate the range in EPD values and that the techniques have so many inherent technical problems that they can mask the true differences between the feeds. Including new elements in feed protein evaluation models may not improve the precision of production response predictions unless the consequent effects on the supply of other nutrients are taken into account.
Squat Detection of Railway Switches and Crossings Using Wavelets and Isolation Forest Railway switches and crossings (S&Cs) are critical, high-value assets in railway networks. A single failure of such an asset could result in severe network disturbance and considerable economical losses. Squats are common rail surface defects of S&Cs and need to be detected and estimated at an early stage to minimise maintenance costs and increase the reliability of S&Cs. For practicality, installation of wired or wireless sensors along the S&C may not be reliable due to the risk of damages of power and signal cables or sensors. To cope with these issues, this study presents a method for collecting and processing vibration data from an accelerometer installed at the point machine to extract features related to the squat defects of the S&C. An unsupervised anomaly-detection method using the isolation forest algorithm is applied to generate anomaly scores from the features. Important features are ranked and selected. This paper describes the procedure of parameter tuning and presents the achieved anomaly scores. The results show that the proposed method is effective and that the generated anomaly scores indicate the health status of an S&C regarding squat defects. Introduction In recent years, rail transportation has gained significant attention due to its potential to relieve road and air congestion and environmental problems. Railway traffic in Europe has experienced a significant rise in both transporting passengers and freight in Europe. In EU-15 countries, the passenger-kilometres and the rail freight ton-kilometres increased 28% and 15%, respectively, between 1990 and 2007. The increased volumes of freight and passenger traffic are challenges that need to be addressed because they set higher requirements on the maintenance and renewal process. To keep the railway transportation efficient, comfortable and safe under such circumstances, innovative maintenance techniques of the critical components are vital. Railway switches and crossings (S&Cs) are important components of railway transportation infrastructure. A failure in the S&C could lead to delays globally in the system and considerable economical loss. Since S&Cs include movable parts, and are the discontinuous points of the rail geometry, they encounter high failure rates. Maintaining and renewing the S&Cs across the rail network is expensive. According to Cornish et al., S&Cs have consumed 24% of the maintenance and 23% of the renewal budget against only 5% of the track miles in the U.K. In 2018 alone, S&Cs cost 530 MSEK, which is around 10% of the entire maintenance budget in Sweden. In the worst case, such a failure could even result in catastrophic accidents due to derailments. Due to safety concerns and their high maintenance costs, monitoring the status of S&Cs and performing preventive maintenance is important. Many studies have been performed to monitor the status of S&Cs. Most of the studies use wayside mounted systems. Liu et al. experimented with two different systems. One was equipped with a 3D accelerometer and a speed detection sensor to describe crossing degradation, and the other was a video gauge system (VGS) to detect and quantify ballast conditions. However, the measurements were sensitive to the speed and the type of the train. Data from the same train type and with similar speeds were needed. Boogaard et al. presented a method of utilising both accelerometers and a strain gauge mounted 50 mm below the crossing frog. Only the vibration data of the furthest measuring point from the tip of the nose were presented in the study. The results showed the advantages of combining two different measuring methods for monitoring the crossing nose. However, the proposed approach was focused on measuring the dynamics of the frog in the S&C. Barkhordari et al. proposed a method of employing a wayside system to measure the track acceleration to monitor ballast degradation. However, this method does not provide continuous condition monitoring. Milosevic et al. developed a condition-monitoring approach of railway crossing geometry by using measured and simulated track responses. Kerrouche A. et al. proposed an experimental strain-measurement approach for monitoring the crossing nose of railway S&Cs, However, Both of these studies focused on the crossing nose instead of the whole S&C. The Axle Box Acceleration (ABA) system can also be used to monitor the status of the S&C. Wei et al. evaluated the degradation at a railway crossing using ABA measurements. However, the study focused mainly on the uneven deformation between the wing rail and crossing nose and local irregularity in the longitudinal slope of the crossing nose. Squats are one type of rail defect. According to Grosonni et al., one-third of the recorded failures at the crossing panel are squat-related. Molodova et al. presented a series of studies on utilising ABA to explore the influence of different parameters and to implement an automatic squat-detection method. However, these studies are aiming for normal tracks, and the situation for an S&C is more complicated. In addition, the ABA signal is dependent on the property of the axle box, the condition of the wheel axle bearings and the wheel profiles. Cho proposed a similar method for detecting squat defects using the ABA measurement with signal processing and wavelet spectrum analysis. This study has the same drawback as the other ABA-based methods. As critical components in railway infrastructure, S&Cs are required to be reliable in order to prevent delays and avoid fatal accidents. Nowadays, manual inspection at fixed intervals is still the most commonly used way to assess the status of S&Cs. These manual inspections encounter human errors and can lead to severe accidents. Manual inspection can also place inspectors in danger as regular physical access to the railway is inevitable. A plausible solution to this conundrum can be to automate the process of squat detection and monitor the health status of S&C to obtain more frequent updates of the status information, reduce the cost of inspections, reduce system down-time and increase safety. Anomaly-detection techniques are suitable for finding the segments of S&C that contain squats among the healthy data. In most of the prior studies using wayside monitoring techniques, sensors were either installed on the side, underneath the rails or mounted on the sleeper to collect the data. These approaches of installing wired or wireless sensors are not practical due to an increased risk of damaged power and signal cables or sensors themselves under normal operation or during maintenance activities. A possible solution to overcome this issue is to make use of the protective environment within the point machines to host the sensors. This study proposed an approach of positioning the accelerometer inside the point machine to estimate the overall health condition of the S&C. The accelerometer was installed on one rod of the point machine with customised aluminum holder. This positioning provides good protection for the accelerometer against harsh weather conditions. An electrical power supply is also easily accessible from the point machine. Previously, the sensors were either installed on the axle box of the train [13,, on the bogie of the train, directly on the rail or underneath the rail. This study proposes a new processing procedure which combines classical time-domain features with features derived from scale-averaged wavelet power (SAWP) with the help of wavelet techniques and utilises an unsupervised anomaly-detection algorithm called isolation forest to predict the anomaly score. This combination has not been yet utilised to process vibration data from the railway application. The previous studies used only time domain features and supervised machine-learning algorithms. The objective of this study is to enable continuous monitoring of the S&C to estimate its general health condition and to reduce the human interventions on track for the inspection purpose. Previous studies focused only on individual defects on normal rails. The rest of the paper is organised as follows. Section 2 presents the materials and methods. Section 3 presents the results and discussions. Section 4 presents the conclusions and the future works. Materials and Methods The basic idea behind the current study is that the vibration at the point machine is affected by the squats on the rail head of the S&C. Squats may lead to defects, which can result in system failure during normal railway operations. The vibration is the result of a dynamic response to the wheel-rail interaction. If the rail has squats, then the vibration signal will also change its property. Therefore, analysing the vibrations can be effective in estimating the health status of the S&C. The experiment for this study was carried out along a testbed including a full-scale S&C and a 6-tonne bogie wagon. Two levels of squats were introduced manually with 1 mm and 4 mm maximum depth. The vibration sensor is mounted at the point machine. Several signal-processing steps were applied to the original signal and 11 features were extracted for each segment of the signal. The features were the root mean square (RMS), standard deviation, shape factor, kurtosis, skewness, peak-to-peak amplitude, impulse factor, crest factor and clearance factor from time domain and the number of peaks and the total peak power from the SAWP. These features were used as input to an unsupervised anomaly-detection algorithm named isolation forest to predict if a section contained squat defects or not. By combining the results of each individual segment, the health condition of the whole S&C could be assessed. A detailed description of the methods used for this study are described in the sub-sections below. Track Layout and the Testbed In this study, an approach was presented to investigate how to detect and evaluate the health status of an S&C regarding squat defects by using unsupervised machine learning. The experiment was performed with a testbed located at lule University of Technology including a full-scale S&C and a 6-tonne bogie wagon. This bogie wagon has two axles, and the distance between them is 2.5 m. The S&C used has a dimension of 1:16 and a length of 38.14 m. The accelerometer was mounted on the point machine to provide a protective environment for the accelerometer and easy access to electricity. The vibration signal and the corresponding speed information were measured. The test site is shown in Figure 1 and an illustration of the testbed is shown in Figure 2. The squats were labelled from A to K. S 0 and S 1 were two stop blocks mounted on the two ends of the rails in the through direction. The point machine is 5.86 m from the stop block S 0. To simulate two different squat levels, the squats were manually introduced stepwise with 1 mm and 4 mm maximum depths. The dimensions of the squats with two different levels were measured and are presented in Table 1. The sensor used was KS91C. It has a measuring range of 0.3-37,000 Hz, sensitivity was 10 ± 20% mV/g and the resonant frequency was greater than 60 kHz (+25 dB). The position of the accelerometer is visualised in Figures 2 and 3. The vibration in the z-direction was measured. The accelerometer was glued to the aluminum holder which was mounted on one rod of the point machine. Test Procedure and Data Acquisition The experiment was performed as follows. Three different test cases were performed. The bogie wagon travelled from S 0 to S 1 without squats, with squats of 1 mm depth and with squats of 4 mm depth. Each test case was repeated 3 times. In total, 9 instances were recorded. The vibration data were measured with the accelerometer installed at the point machine. A data acquisition platform DAQ9174 was utilised to capture the vibration data and feed them to the computer directly. The sampling frequency of the platform was 51.2 kHz. The speed was measured with a customised tachometer with Hall effect senor A3144 and neodymium magnets. An Arduino Uno unit was utilised to send the revolution per minute (RPM) data of the left back wheel via WiFi to the computer. The controlling system was implemented in VI code running in LabVIEW 2019. Signal-Processing Procedure The signal-processing procedure for this study is described in Figure 4. The vibration signals were initially filtered with a third-order Butterworth band-pass filter with 50 Hz and 2.5 kHz cutoff frequencies. The band-pass filter was used to filter away the frequencies with noise and preserve the useful information. A wavelet magnitude scalogram was utilised as a tool to help decide the cutoff frequency of the band-pass filter. The process is explained by using the following example. A piece of vibration data with a squat defect was extracted and evaluated with wavelet transform. Figure 5 presents the wavelet magnitude scalogram of squat G in a 4 mm case. It showed that the main energy of the response for the squat defect was around 200 Hz to 400 Hz. There was also a second frequency band around 500 Hz to 2000 Hz. This implied how the band-pass filter should be designed. The filtered signals were aligned and truncated to equal length. This step makes it possible to compare the results from different runs in the results. It could also be utilised in future studies to accurately extract the position information. Further, the signal was down-sampled to one-tenth of the original frequency. As the band-pass filter has a cutoff frequency as high as 2.5 kHz, the original signal with sampling frequency at 51.2 kHz contains redundant information. A sampling frequency at 5 kHz was enough to preserve all the useful information. To make the calculation easier, 5.12 kHz was applied. The output signals were processed in two separate paths after that. On one path, the signals were directly segmented into 400 equal-sized segments and 9 corresponding time-domain features were extracted. The features used in this study were RMS, standard deviation, shape factor, kurtosis, skewness, peak-to-peak amplitude, impulse factor, crest factor and clearance factor. On the other path, wavelet denoising was applied. The denoising was set at a level 9 decomposition, with Symlet 4 wavelet, Empirical Bayesian denoise method with median thresholding and level-dependent noise estimator. The SAWP was calculated from the output signal. Two features, the number of peaks and the total peak power, were extracted from the SAWP time series and assigned to each segment. In total, 11 features were generated. The extracted features are also described in Table 2. Wavelets The concept of wavelet transform can be traced back to 1909 when Harr introduced the first wavelet. Wavelet transform can be divided into two categories, namely, continuous wavelet transform (CWT) and discrete wavelet transform (DWT). CWT is a very powerful tool for time-frequency analysis and can be viewed as replacing the short-time Fourier transform's "time-frequency window" g t, with a "time-scale window" a,b. However, calculating all wavelet coefficients at all scales is computationally expensive, and it contains a high amount of redundant information. DWT is a good alternative in some cases. DWT works similarly to a band-pass filter and it can be performed for a signal on several levels. Each level decomposes the original signal into approximations (the low-frequency part) and details (the high-frequency part). The next level of DWT is carried out on the approximations of the previous level. Mathematically, the DWT of a function f (x) is defined as the integral transform of f (x) with wavelet functions a,b (x), when scales and positions are based on powers of two. It is defined as follows: where Here a is called the scale factor and represents the scaling of the function, and b is called the shift factor and represents the temporal offset of the function. Wavelet denoising utilises DWT to decompose the original signal to obtain the wavelet coefficients, thresholding the coefficients and reconstructing the signal with reverse DWT. Wavelet denoising has been widely utilised to denoise different vibration signals. Chen et al. proposed a wavelet denoising method for the vibration signals collected from wind turbines. Chegini et al. proposed a new application using imperial wavelet transform denoising in bearing fault diagnosis. He et al. constructed a distributed acoustic sensor technology using multi-level wavelet decomposition denoising for condition monitoring of the heavyhaul railway. More details of the wavelet denoising technology implementation and application can be found in "Wavelet Denoising" by Luo and Zhang. When applying wavelet denoising, a few parameters and the thresholding method needed to be decided. The maximum level of decomposition depended on signal length (N). The data acquired yielded a maximum number of decomposition levels of 21. Levels of coefficients influenced the kurtosis of the signal. Increasing the number of levels of decomposition would lead to more aggressive denoising but also distort the output signal more. Empirical testing with different levels yielded 9. The wavelet function should reflect the features presented in the signal in the time domain. However, since the primary interest in this study was the SAWP time series, the different types of wavelet functions would yield the same qualitative results. Symlet 4 (sym4) was chosen. There were a few methods that could be used to determine the denoising thresholds. Empirical Bayesian, block James-Stein, false discovery rate, minimax estimation, Stein's unbiased risk estimation and universal threshold were tested. The influence on the SAWP is insignificant. Since the signal without noise was not available, a quantitative comparison could not be performed in this case. Empirical Bayesian with median thresholding was chosen. SAWP The SAWP time series over scales s 1 to s 2 is defined as follows : where s j = s 0 2 j j, j = 0, 1,..., J C is scale independent and a constant for the selected wavelet function, j is a factor for scale averaging, t is the sampling period and j 1,..., j 2 represent scales over which the SAWP is computed. s 0 is the smallest resolvable scale and J determines the largest scale. W n (s) is the continuous wavelet transform of a discrete sequence. N is the number of points in the time series. This can be utilised to examine fluctuations in power over a range of scales, which is exactly what was needed to detect the power burst in the vibration signal when a wheel hits a squat or a gap. This power time series will be utilised later to extract two peak-related features. The threshold for the detection of peaks was set to 2.5 10 −8 g 2. The threshold was chosen empirically. Figure 6 shows an example of the identified peaks in a 4 mm squat depth case. The corresponding number of peaks in each segment and their total power were calculated. Isolation Forest An isolation forest is an unsupervised anomaly-detection technology based on the idea of isolating anomalies instead of profiling the normal points. Given a set of observations, the isolation forest algorithm selects a random sub-sample of the observations and assigns them to a binary tree. The algorithm starts by selecting a random feature from d-dimensional features. A split is then done on a random threshold in the range of the selected feature. If the value of one observation is less than the selected threshold, it goes to the left branch; otherwise, it goes to the right. With such an approach, a node is split into left and right branches. This process continues recursively until all data points are completely isolated or when the max depth is reached. The above steps are repeated to construct random binary trees until all observations are isolated. Those points that are easier to isolate and with smaller path lengths will thus have higher anomaly scores. A comprehensive description of the isolation forest algorithm is given by Liu F.T. et al.. The 11 features extracted were first scaled using normalisation. The scaled features were evaluated and selected by using both PCA and Laplacian score. After applying the isolation forest algorithm, each segment received an anomaly sore. The threshold of anomaly score to separate the healthy data and the anomalies were decided by finding the knee point. After the hyperparameters were decided, two possible indicators were proposed. Segmentation All signals were aligned to have the same starting points and truncated to 350,000 samples for each signal. The signal was segmented into 20 segments and the anomaly scores achieved cannot pinpoint the precise defect location. To be able to obtain more accurate positioning of anomalies the number of segments was increased to 200 and 400, respectively. In the 400 segment case, since the speed of the bogie never exceeds 2 m/s each segment corresponds to around 0.17 s and will not be more than 0.34 m. This achieves a resolution that can be used in identifying the individual defects. The results of the influence of segment size are shown in Figure 7. Feature Extraction A total of 11 features were extracted from both the processed time-domain signal and the SAWP time series. The features can be grouped into two categories. The RMS, standard deviation, shape factor, kurtosis, skewness, peak to peak amplitude, impulse factor, crest factor and clearance factor are time-domain statistical features. The number of peaks and total peak power are extracted from the SAWP. All the extracted features are summarised in Table 2. Feature Scaling The two most used types of feature-scaling techniques are normalisation and standardisation. Normalisation is also referred to as max-min scaling, and standardisation is also referred to as Z-score normalisation. The normalisation scales the input feature values to the range of, while standardisation converts the input feature values to obtain zero mean and a unit standard deviation. Since the PCA algorithm requires input features to have zero mean and a unit standard deviation, the features were standardised. Feature Selection Two different approaches for feature selection were utilised. The first method utilised PCA. The accumulated PCA feature importance score is presented in Figure 8. The first five features in the PCA space captured 96.55% of all the useful information. The second method employed the Laplacian score for feature selection. The redundant features were removed using the cross-correlation values between the features. Usually, the Laplacian score is defined as L r = 1 − s r where s r is a score for each feature. However, MATLAB only uses the second term s r which represents the feature importance. Therefore, a lower Laplacian score is equivalent to a higher feature importance score, which indicates the corresponding feature is more important. The Laplacian feature importance was calculated and ranked using MATLAB and the results are shown in Figure 9. The correlations between the most significant feature and the others are calculated. The procedures for removing correlated features are as follows. The most important feature was selected and the cross correlation between it and the rest of the features was calculated. The features that had a higher correlation value than 0.9 with the most important feature were removed. This procedure was repeated for the second most important feature in the remaining feature set. This process stopped when there were no two features left that had a cross correlation value higher than 0.9. As a result, the remaining features are features 10, 11, 9, 8, 5 and 6. The results of utilising different groups of features were compared. Figure 10 shows an example of the anomaly scores for a test case with no squat with different feature groups. The anomaly scores using PCA space features and the Laplacian score-selected features are shifted down with 0.5 and 1 correspondingly for better visualisation. The numerical comparison using mean root squared error (MRSE) is presented in Table 3. The anomaly scores generated by using all features and the PCA space features are very similar. This can be explained because the selected 5 PCA space features explain 96.55% of the variance of all features combined. The anomaly scores generated by using Laplacian score-selected features and the PCA space features are also very similar. This shows both feature selection approaches generate similar anomaly scores and are acceptable. However, the anomaly scores generated by using all features and the Laplacian score-selected features are slightly more different with around double MRSE values. The Laplacian score approach only removed five redundant features that are highly correlated with the selected most important features and the anomaly score difference is still small. Plotting and comparing the anomaly scores for those two cases verifies that the difference is so small that it is reasonable to assume similar performance. From a performance point of view, either group of features could be utilised for further study. However, because of the curse of dimensionality, a higher dimension of features leads to exponentially increased computational efforts. Therefore, it is reasonable to choose either the PCA space features or the Laplacian score-selected features. A drawback with PCA space features is that the generated features are linear combinations of the original features and they become less interpretable and lose their physical meanings. Those two reasons combined justify that it is reasonable to choose Laplacian score-selected features for further study. Threshold for Anomaly Score A threshold should be provided to decide what an anomaly is. The descend-sorted anomaly scores against the index of all 9 instances are plotted in Figure 11. Each instance contains 400 segments and these 9 instances contain 3600 segments in total. The knee point method was applied and it was found that the point with index 436 was the knee. This corresponded to around 12% of the total segments. Therefore, the 88th percentile should be used as the threshold. This is verified by plotting the anomalies using the 88 percentile together with the vibration signal. An example of the results for a 4 mm case is presented in Figure 12. By using the 88th percentile, most of the anomalies were found without introducing unexpected false alarms. Anomaly Indicator for the Whole Switch All the test cases and the corresponding anomaly score above the threshold are presented in Figure 13. This shows clearly that with increased squat depth more anomalies are found, which indicates the health status of the S&C is degraded. It can also be observed that the different test runs were well aligned at the beginning; however, with a different speed profile for each run, the spotted defects also encounter a different drift. They are no longer well aligned after a while. This, however, would not influence the result as utilising the anomaly score as an indicator of the health status of the S&C. One indicator could be calculating the sum of all anomaly scores and using the mean value for each test scenario. From the test data, the observed scores are 11.65, 20.31 and 29.59 for the S&C with healthy, 1 mm deep and 4 mm deep squat cases. Another indicator could be the mean value of the number of anomalies for each test scenario. From the test data, the average number of anomalies was 18.67, 32.67 and 45.00 for the S&C with healthy, 1 mm deep and 4 mm deep squat cases. Conclusions and Future Works The present study demonstrates that it is possible to use the proposed method to extract features and utilise unsupervised anomaly-detection techniques, such as the isolation forest to detect the squat defects. The following conclusions can be drawn: The study shows that accelerometers placed within the protective environment within a point machine can be utilised for monitoring defects such as squats along the S&Cs of the railway infrastructure. The signal-processing procedure of extracting features from both the time domain vibration signal and the SAWP is effective and promising. Skewness, peak to peak amplitude, crest factor, clearance factor, Nr. of peaks and total peak power are ranked to be the top features for anomaly detection. An isolation forest algorithm is suitable for anomaly detection related to the squat defects. Since isolation forest is an unsupervised machine-learning technique, no labelled data are needed to train the model. By learning from the unlabelled data, a model is built and can be utilised to perform anomaly detection on the new data. It is promising to utilise such an approach to enhance the safety and reliability of S&C. One future study would be to verify the approach with data from S&C in a real railway network. Another interesting future study could be to take into consideration such parameters as train type, load and speed among the indicators and extend the method. The future study will also include enhancing the current data set and carrying out a comparative study where the results of the proposed unsupervised anomaly-detection model will be compared to other anomaly-detection methods such as neural networks. In the future, a nationwide conditionmonitoring system for S&Cs could be developed by combining such an approach and the concept of federated learning. Acknowledgments: Great thanks to Jan Lundberg and Taoufik Najeh for the design and building of the complete test rig and performing all experimental tests and collecting all data. Conflicts of Interest: The authors declare that there is no conflict of interest. Sample Availability: Samples of the compounds are available from the authors.
WASHINGTON – A Republican revolt stalled urgent efforts to lash together a national economic rescue plan Thursday, a chaotic turnaround on a day that had seemed headed for a success that President Bush, both political parties and their presidential candidates could celebrate at an extraordinary White House meeting. They gave up after 10 p.m. EDT, more than an hour after the lone House Republican involved, Rep. Spencer Bachus of Alabama, left the room. Democrats blamed the House Republicans for the apparent stalemate. Those conservatives have complained that the plan would be too costly for taxpayers and would be an unacceptable federal intrusion into private business. Talks were to resume this Friday morning on the effort to bail out failing financial institutions and restart the flow of credit that has begun to starve the national economy. The plan’s centerpiece still is for the government to buy the toxic, mortgage-based assets of shaky financial institutions in a bid to keep them from going under and setting off a cascade of ruinous events, including wiped-out retirement savings, rising home foreclosures, closed businesses, and lost jobs. The day’s White House summit, bringing together Bush, presidential rivals John McCain and Barack Obama, and top congressional leaders, had been aimed at showing unity in resolving a national financial crisis, but it broke up with conflicts in plain view. News of the apparent breakthrough helped inspire Wall Street, which recorded a 196-point gain for the Dow Jones industrials. The later breakdown seemed likely to send stocks downward again on Friday.One group of House GOP lawmakers circulated an alternative that would put much less focus on a government takeover of failing institutions’ sour assets. This proposal would have the government provide insurance to companies that agree to hold frozen assets, rather than have the U.S. purchase the assets. Rep Eric Cantor, R-Va., said the idea would be to remove the burden of the bailout from taxpayers and place it, over time, on Wall Street instead. The price tag of the administration’s plan to bail out tottering financial institutions – and the federal intrusion into private business matters – have been major sticking points for many Republican lawmakers. “Sen. McCain and the president between them ought to be able to get House Republicans back to the table,” said Frank. “All of us around the table … know we’ve got to get something done as quickly as possible,” Bush told reporters, brought in for only the start of the meeting. Obama and McCain were at distant ends of the oval table, not even in each other’s sight lines. Bush, playing host in themiddle, was flanked by Congress’ two Democratic leaders, House Speaker Nancy Pelosi and Senate Majority Leader Harry Reid. McCain and Obama later said they both still expected an agreement could be reached. Under the accord announced hours earlier among key lawmakers, the Treasury secretary would get $250 billion immediately and could have an additional $100 billion if he certified it was needed, an approach designed to give lawmakers a stronger hand in controlling the unprecedented rescue. The government would take equity in companies helped by the bailout and put rules in place to limit excessive compensation of their executives, according to a draft of the outline obtained by The Associated Press. McCain hoped voters would believe that he rose above politics to wade into successful, nitty-gritty dealmaking at a time of urgent crisis, but he risked being seen instead as either overly impulsive or politically craven, or both. Obama saw a chance to appear presidential and fit for duty, but was also caught off guard strategically by McCain’s surprising gamble in saying he was suspending his campaigning and asking to delay Friday night’s debate to focus on the crisis. Associated Press writers Deb Riechmann, Martin Crutsinger, Christopher Wills and Beth Fouhy in Washington and researcher Judy Ausuebel in New York contributed to this story.
President Obama signed an executive order Thursday aimed at linking high-speed broadband Internet providers and application developers with communities to design and test new applications in medicine, engineering and other fields. The idea behind the U.S. Ignite program is to use the broadband communities as test beds to develop applications that eventually can be scaled nationwide, according to a White House blog post. During a launch event, researchers at Case Western Reserve University demonstrated a broadband-based simulated surgical theater, which they said could eventually be used at broadband-equipped medical schools across the country. The U.S. Ignite program is designed to “unlock the power of American innovation mojo,” federal Chief Technology Officer Todd Park said during the event. Another portion of the executive order requires federal agencies to take a uniform approach to approving broadband building projects along federal properties and roadways, which the White House said could make broadband construction up to 90 percent cheaper. Broadband can run up to 100 times faster than the current Internet, allowing for much more complex operations. The U.S. Ignite partnership includes more than 25 cities, multiple corporations and nonprofits, and more than 60 research universities, the White House said. Applications developed through the program will focus on six key areas, according to a fact sheet: education and workforce development, advanced manufacturing, health, transportation, public safety, and clean energy. The National Science Foundation, the lead federal agency on the U.S. Ignite project, has committed to spending $20 million to support infrastructure building and application development, the fact sheet said. The investment is an outgrowth of NSF’s Global Environment for Networking Innovations, or GENI, project, which focused on bringing high-speed Internet to university researchers. “By bringing software developers and engineers from government and industry together with representatives from communities, schools, hospitals and other institutions that will benefit from faster and more agile broadband options, the partnership aims to speed up and increase the development of applications for advanced manufacturing, medical monitoring, emergency preparedness, and a host of other services,” the White House said. Thursday’s executive order also requires agencies to post federal properties that could be used for broadband deployment on their websites and to track regional broadband deployment projects on the Federal Infrastructure Projects Dashboard. (Image via asharkyu/Shutterstock.com)
HOMESTEAD, Fla. (AP) — Jimmie Johnson walked into a Hendrick Motorsports family reunion on pit road. Hall of Fame driver Jeff Gordon chatted near the No. 48 car while Johnson’s wife cradled crew chief Chad Knaus’ baby boy. They needed baby wipes as much as a checkered flag. “It’s all about this moment right now,” Gordon said. Brooke Knaus kissed her husband, and team owner Rick Hendrick stopped by to give good-luck hugs before he checked in on his other cars. The Johnsons and Knauses posed for family snapshots, and security held off scores of well-wishers wanting to sneak a closer look. The NASCAR season finale at Homestead-Miami Speedway has featured goodbyes in recent years, with stars like Gordon, Tony Stewart and Dale Earnhardt Jr. leaving the series. This moment had the feel of a retirement party for Johnson — but it was just the swan song for one of the most dominant driver-crew chief pairings in NASCAR history. Johnson and Knaus have parted ways after 17 years, seven NASCAR championships, and an unmatched mark of success in the sport over the last 20 years. They couldn’t grasp greatness one more time, finishing 14th in the finale and going winless in a season for the first time in Johnson’s Cup career. They hugged on pit road and toasted to their tenures with beers before they officially put their partnership on ice. “He is my brother, there is just no way around it,” Johnson said. Johnson turned the farewell weekend into a throwback party that celebrated his earlier years. The 43-year-old Johnson stunned the sport when he showed up at the track without his salt-and-pepper facial hair for the first time in nearly a dozen years. Johnson tweeted a photo of a razor with the caption, “I’m all in for the throwback weekend!” Johnson, a fitness freak, looked about as baby-faced as he did as a rookie in 2002. NASCAR poked fun at Johnson’s retro look with a tweet that said, “2002 called, it wants its look back.” Johnson’s former look still was stamped on his official emoji on his Twitter feed. He was clean shaven — and ready for a clean break from Knaus. The two are still close friends but there were no signs of tears or tissues as they talked shop for the last time as a team. Johnson slid into the Chevy — sporting a vintage Lowe’s paint scheme — and adjusted his helmet. Knaus took a knee by the car window on the grid and flipped through pages of the playbook that hid the secrets of their race strategy. Knaus smacked the top of Johnson’s helmet as he walked off to the pit stall. But the sights and sounds of a reason for their breakup enveloped the track: UFC ring announcer Bruce Buffer bellowed the race introductions for the four championship drivers. Defending series champion Martin Truex Jr. waved to the crowd on a parade lap as Knaus dabbed drool from son Kip’s chin. Once the most feared team in NASCAR, the 48 made a sudden dip into oblivion. Johnson’s 17th season has been his worst and he was knocked out of the playoffs in the first round, ending his run at a NASCAR-record eighth Cup championship. He hasn’t won since Dover 2017, an unthinkable string of 59 straight losses. Sponsor Lowe’s also is leaving NASCAR. Johnson will be funded by Ally Financial Inc. for all 38 races in the next two NASCAR seasons, the remaining years on Johnson’s contract. Ally ensures that Johnson will have just one look in what is probably his final two years chasing sole ownership of the championship record. “We can’t wait to see you rocking a No. 48 Ally car on the tracks in 2019,” the financial company tweeted. Johnson wore an old-school Lowe’s hat from 2002. “We can’t say enough good things about our #lowes48 guys and everyone at HMS. It’s been an incredible ride,” Lowe’s Racing tweeted. Johnson, Knaus and 17 crew members lined the 48 for a final team photo before the farewell race. Johnson will try and rebound in Daytona with new crew chief Kevin Meendering. Knaus, a Hendrick lifer, will call the shots for William Byron and the No. 24 team next season. “I’ve got a lot of work to do with my new team, we’ve got a lot of work to do with this new package and we’ve got to come out stronger,” Johnson said. But as much as the pomp marked the end of an era, Johnson isn’t going anywhere. Johnson, married and with two young daughters, is still driven to top Richard Petty and Dale Earnhardt and stand alone as NASCAR’s greatest champion. He may get No. 8 — just without Knaus.
Be scary good this Halloween by participating in Trick-or-Treat for UNICEF! It's a great opportunity to collect donations instead of candy or to bring your club’s members together for a fabulous event or party. All money collected by Key Club, Aktion Club, K-Kids or Builders Club members will support The Eliminate Project to help save and protect moms and babies from maternal and neonatal tetanus. Are you Trick-or-Treating for UNICEF with Circle K? Learn more here. HOW TO TAKE PART It’s easy! You can participate in five simple steps: Decide with your club what kind of fundraiser you’ll do. Order your collection boxes. Collect donations. Fill out the gift form and submit it with your club’s donations. Celebrate! You’re helping save and protect moms and babies. Don’t forget! Clubs that raise and submit more than US$250 by December 31 will receive a special banner patch. So, don’t forget to send in the funds you raise by the deadline! Expand your service power through Kiwanis Gives Online. This free fundraising platform is easy to start and will help increase donations for Trick-or-Treat for UNICEF.
. OBJECTIVE To investigate the clinical and biological characteristics and prognosis of mixed phenotype acute leukemia (MPAL). METHODS Thirty two patients were diagnosed as MPAL by bone marrow examination, immunophenotyping, cytogenetic and molecular assay and were treated with combined chemotherapy regimens for both acute lymphoblastic and acute myeloid leukemia. Two cases were received allogeneic hematopoietic stem cell transplantation (allo-HSCT). RESULTS The incidence of MPAL in acute leukemias was 2.6%. There were 16 cases (50.0%) of mixed myeloid and B-lymphoid (M/B), 14(43.8%) myeloid and T-lymphoid (M/T), one each (3.1%) of trilineage (M/B/T) and B- and T-lymphoid (B/T) phenotype. The positive rates of CD34 and HLA-DR were 87.5% and 62.5%, respectively. Abnormal karyotypes were detected in 70.0% of 30 MPAL patients, which were structural and numerical abnormalities including t(9;22), 11q23 and complex karyotypes. The total complete remission (CR) rate was 75.0% and the overall survival (OS) and disease-free survival (DFS) at 2 years were 14.8% and 14.2% respectively. The CR rates for M/B and M/T cases were 75.0% and 71.4% respectively. No statistical difference was observed in OS and DFS between M/B and M/T cases. CONCLUSIONS MPAL is a rare type of acute leukemia with a high heterogeneity. The unfavorable indicators of MPAL may be factors such as abnormal karyotypes, high expression of CD34 and extramedullary infiltration. Combined regimens and more intensive therapy including allo-HSCT might contribute to improving survival.
24-hour blood pressure measurements: methodological and clinical problems. Awareness that sphygmomanometry is encompassed with serious limitations has led to the development of techniques that allow blood pressure (BP) to be monitored intraarterially or noninvasively during the day or over a 24-hour period. Although intraarterial BP monitoring allows an accurate evaluation of 24-hour BP mean and variability, its invasiveness prevents routine use in the clinical practice. This use can be more easily foreseen for noninvasive ambulatory BP monitoring, provided that the questions posed by this approach are answered. In the present study we show that the intermittent cuff inflations which allow BP to be measured noninvasively do not induce an alerting reaction and a BP rise in the patients, which means that this approach does not disrupt the daily BP profile. We also show that noninvasive BP monitoring does not alter the nocturnal hypotension, thus, preserving the day and night BP rhythm. Finally, we present evidence from a cross-sectional study that 24-hour BP monitoring reflects more closely the hypertension-related target organ damage than sphygmomanometric BP measurement and that target organ damage is related not only to average BP regimens but also to the degree of BP variability. Although support from prospective studies is necessary, this suggests that the diagnosis of hypertension and the prediction of its risk may be improved by ambulatory BP monitoring.
async def entry_card(user: User = AUTH_USER): logger.info(f"Retrieving user entry status for school: {user.school}") with Neo4JGraph() as graph: record = dict(list(graph.run( """MATCH (m:Member {email: $email,school: $school}) OPTIONAL MATCH (m)-[r:reported]-(d:DailyReport {date: $date}) RETURN r as report, m as member""", email=user.email, school=user.school, date=pst_date()))[0]) location_risk = UserLocationItem().set_location(record['member']['location']) health_risk = UserHealthItem(school=user.school) if not record.get('report'): health_risk.set_incomplete() else: health_risk.from_health_report(HealthReport(**record['report'])) if record['member']['vaccinated'] == VaccinationStatus.VACCINATED: health_risk.add_vaccination(VaccinationStatus.VACCINATED, update_color=not SymptomConfigRetriever.get(user.school)['ignore_vaccine']) identified_item_params = dict(name=f"{record['member']['first_name']} {record['member']['last_name']}") if location_risk.entry_blocked(): identified_item_params.update(dict(entry=False, reason=EntryReason.LOCATION)) elif health_risk.is_incomplete() or health_risk.at_risk(include_warning=True): identified_item_params.update(dict(entry=False, reason=EntryReason.HEALTH)) else: identified_item_params.update(dict(entry=True, reason=EntryReason.HEALTH)) return IdentifiedUserEntryItem(**identified_item_params).set_reports(health=health_risk, location=location_risk)
Morphometric analysis of various measurements of malleus on the basis of sexual dimorphism The malleus is the largest of the ear ossicles. In the present study, we tried to determine the normal range of the values of various measurements of malleus and whether these measurements are useful for the sexual dimorphism or not. Out of total 60 malleus used in the present study, 30 were retrieved from the male cadavers and 30 were retrieved from the female cadavers. Mean of total length of malleus, mean of length of manubrium of malleus and mean of length of head and neck of malleus in male are more as compared to female in the present study. Mean of index of malleus in male is less as compared to female in the present study. Among all the measurements taken in the present study; total length of malleus having statistically significant difference between the male and female malleus and it can be used for the sexual dimorphism of malleus for Kachchhi Gujarati population of present study. Introduction The malleus is the largest of the ossicles, and is shaped somewhat like a mallet. It is 8-9 mm long and has a head, neck, handle (manubrium) and anterior and lateral processes. The head is the large upper end of the bone and is situated in the epitympanic recess. It is ovoid in shape, articulates posteriorly with the incus, and is covered elsewhere by mucosa. The cartilaginous articular facet for the incus is narrowed near its middle and consists of a larger upper part and a smaller lower part, orientated almost at right angles to each other. Opposite the constriction, the lower margin of the facet projects in the form of a process, the spur of the malleus. The neck is the narrowed part below the head, and inferior to this is an enlargement from which the anterior and lateral processes project. The handle of the malleus is connected by its lateral margin to the tympanic membrane. It is directed downwards, medially and backwards. It decreases in size towards its free end, which is curved slightly forwards and is flattened transversely. Near the upper end of its medial surface there is a slight projection to which the tendon of tensor tympani is attached. The anterior process is a delicate bony spicule, directed forwards from the enlargement below the neck, and connected to the petrotympanic fissure by ligamentous fibres. In fetal life it is the longest process of the malleus and is continuous in front with Meckel's cartilage. The lateral process is a conical projection from the root of the handle of the malleus. It is directed laterally and is attached to the upper part of the tympanic membrane and, via the anterior and posterior malleolar folds, to the sides of the notch in the upper part of the tympanic sulcus. The cartilaginous precursor of the malleus originates as part of the dorsal end of Meckel's cartilage. With the exception of its anterior process, the malleus ossifies from a single endochondral centre which appears near the future neck of the bone in the fourth month in utero. The anterior process ossifies separately in dense connective tissue and joins the rest of the bone at about the sixth month of fetal life. In the present study, we tried to determine the normal range of the values of various measurements of malleus and whether these measurements are useful for the sexual dimorphism or not. Materials and Methods In the present study, 60 dry adult malleus bones were used. These bones were retrieved during the routine dissection of cadavers donated to the Department of Anatomy, Gujarat Adani Institute of Medical Sciences, Bhuj, Gujarat, India. Out of total 60 malleus used, 30 were retrieved from the male cadavers and 30 were retrieved from the female cadavers. Pathological, fractured or malleus of unknown sex were excluded from the study. Only fully ossified malleus of known sex were included in the study. The study was done during the year 2017-18 after the prior approval of the institutional ethics committee of Gujarat Adani Institute of Medical Sciences, Bhuj, Gujarat, India. Following measurements of the malleus were taken by using digital vernier caliper: 1. Total length of malleus Total length of malleus was measured as the maximum straight distance between the top of the head and the end of the manubrium of the malleus. Length of manubrium of malleus was measured as the maximum straight distance between the end of the lateral process and the end of the manubrium of the malleus. Length of head and neck of malleus was measured as the maximum straight distance between the top of the head and the end of the lateral process of the malleus. Fig. 3: Showing the measurement of length of head and neck of malleus To avoid intra-observer variation, each measurement was taken at three different times and the mean of all three readings was taken as the final reading. Above mentioned measurements were further used to calculate the following index: 1. Index of malleus Index of malleus was calculated by using the following formula: Index of malleus = length of manubrium of malleus/ total length of malleus x 100 For each of this measurement, mean value, standard deviation, standard error of mean, range (minimummaximum), mean ± SD (Standard Deviation) were calculated. Independent sample t test was applied and p value was calculated at 95% confidence interval by using SPSS (Statistical Package for the Social Sciences) software for the comparison of various parameters and index of malleus in male and female. As shown in the table 1, in the present study, mean of total length of malleus in male is 7.8847 mm, SD is 0.4451 mm, range from 6.97 mm to 8.70 mm and mean±SD from 7.4396 mm to 8.3298 mm. Mean of total length of malleus in female is 7.6280 mm, SD is 0.4801 mm, range from 6.88 mm to 8.38 mm and mean±SD from 7.1479 mm to 8.1081 mm. Results and Discussion Mean of length of manubrium of malleus in male is 4.5943 mm, SD is 0.4514 mm, range from 3.96 mm to 5.94 mm and mean±SD from 4.1429 mm to 5.0457 mm. Mean of length of manubrium of malleus in female is 4.4787 mm, SD is 0.3667 mm, range from 3.56 mm to 5.12 mm and mean±SD from 4.1120 mm to 4.8454 mm. Mean of length of head and neck of malleus in male is 5.0627 mm, SD is 0.3830 mm, range from 4.16 mm to 5. Mean of total length of malleus, mean of length of manubrium of malleus and mean of length of head and neck of malleus in male are more as compared to female in the present study. Mean of index of malleus in male is less as compared to female in the present study. As shown in table 2, p value for the total length of malleus (for both-with equal variances assumed as well as equal variances not assumed) is 0.036, which suggest statistically significant difference between the male and female malleus for the total length of malleus. P values for other parameters are more than 0.05, which suggest that there is no statistically significant difference for these parameters of malleus between male and female. Graph 4: Showing the distribution of observations of index of malleus in male and female (with 95% confidence interval for mean) as well as line joining the mean of index of malleus in male and mean of index of malleus in female. (IM: Index of Malleus) Arensburg et al in 1981 measured the total length, length of manubrium and index of malleus in Indian population (sample size 31). In that study, mean of total length of malleus was 7.8 mm with SD 0.35 mm, mean of length of manubrium of malleus was 4.4 mm with SD 0.47 mm and mean of index of malleus was 56.6. Oschman Z and Meiring JH in 1991 measured the total length and length of manubrium of malleus in South African population (sample size 122). In that study, mean of total length of malleus was 7.844 mm and mean of length of manubrium of malleus was 4.399. Bhatnagar DP et al in 2001 measured the total length and length of manubrium of malleus in population of Patiala (Punjab, India) (sample size 60). In that study, mean of total length of malleus was 8.36 mm with SD 1.39 mm and mean of length of manubrium of malleus was 4.65 mm with SD 0.27 mm. Unur E et al in 2002 measured the total length, length of manubrium, length of head and neck and index of malleus in Turkish population (sample size 40). In that study, mean of total length of malleus was 7.69 mm with SD 0.6 mm, mean of length of manubrium of malleus was 4.7 mm with SD 0.45 mm, mean of length of head and neck of malleus was 4.85 mm with SD 0.29 mm and mean of index of malleus was 60.97 with SD 3.77. Singh Nadeem G in 2012-13 measured the total length and length of manubrium of malleus in Indian population (sample size 30). In that study, mean of total length of malleus was 8 mm with SD 0.046 mm and mean of length of manubrium of malleus was 4.58 mm with SD 0.015 mm. Ramirez LM and Ballesteros LE in 2013 measured the total length and length of manubrium of malleus in Columbian population (sample size 23). In that study, mean of total length of malleus was 8.18 mm with SD 0.24 mm and mean of length of manubrium of malleus was 4.91 mm with SD 0.25 mm. Mogra K et al in 2014 measured the total length, length of manubrium, length of head and neck and index of malleus in population of Rajasthan, India (sample size: right 33, left 33). In that study, mean of total length of malleus was 8.53 mm with SD 0.58 mm, of right side was 8.515 mm with SD 0.6553 mm and of left side was 8.545 mm with SD 0.5056 mm. In that study, mean of length of manubrium of malleus was 5.2 mm with SD 0.48 mm, of right side was 5.106 mm with SD 0.4636 mm and of left side was 5.303 mm with SD 0.4831 mm. In that study, mean of length of head and neck of malleus was 4.72 mm with SD 0.82 mm, of right side was 4.606 mm with SD 0.8078 mm and of left side was 4.833 mm with SD 0.826 mm. In that study, mean of index of malleus was 61.01 with SD 3.74, of right side was 60.18 with SD 3.555 and of left side was 61.84 with SD 3.788. Vinayachandra PH et al in 2014 measured the total length of malleus in South Indian population (sample size 50). In that study, mean of total length of malleus was 7.45 mm with SD 0.39 mm. Rathava J et al in 2015 measured the total length, length of manubrium and length of head and neck of malleus in population of Gujarat, India (sample size 60). In that study, mean of total length of malleus was 7.81 mm with SD 0.32 mm, mean of length of manubrium of malleus was 4.59 mm with SD 0.34 mm and mean of length of head and neck of malleus was 5 mm with SD 0.2 mm. K Radha in 2016 measured the total length and length of manubrium of malleus in South Indian population (sample size 25). In that study, mean of total length of malleus was 7.4 mm and mean of length of manubrium of malleus was 4.2 mm. Sodhi S et al in 2017 measured the total length, length of manubrium, length of head and neck and index of malleus in North Indian population (sample size: right 50, left 50). In that study, mean of total length of malleus was 7.83 mm, of right side was 7.87 mm with SD 0.37 mm and of left side was 7.8 mm with SD 0.54 mm. In that study, mean of length of manubrium of malleus was 4.44 mm, of right side was 4.47 mm with SD 0.41 mm and of left side was 4.42 mm with SD 0.42 mm. In that study, mean of length of head and neck of malleus was 4.68 mm, of right side was 4.7 mm with SD 0.43 mm and of left side was 4.68 mm with SD 0.41 mm. In that study, mean of index of malleus was 56.77, of right side was 56.77 and of left side was 56.78. In the present study, we measured the total length, length of manubrium, length of head and neck and index of malleus in population of Kachchh, Gujarat, India. In this study, mean of total length of malleus in male is 7.8847 mm with SD 0.4451 and in female it is 7.628 mm with SD 0.4801 mm. Mean of length of manubrium of malleus in male is 4.5943 mm with SD 0.4514 mm and in female it is 4.4787 mm with SD 0.3667 mm. Mean of length of head and neck of malleus in male is 5.0627 mm with SD 0.383 mm and in female it is 4.949 mm with SD 0.2489 mm. Mean of index of malleus in male is 58.2517 with SD 4.3775 and in female it is 58.786 with SD 4.4245. Conclusion We determined the normal range of the values of various measurements and index of malleus in the Kachchhi Gujarati population of present study. Among all the measurements taken in the present study; total length of malleus having statistically significant difference between the male and female malleus and it can be used for the sexual dimorphism of malleus for Kachchhi Gujarati population of present study. Conflict of Interest: Nil.
// GetNonDefaultMounts returns a mount set from the provided mount list without default Moby mounts that it may contain func GetNonDefaultMounts(mountList []specs.Mount) []specs.Mount { defaultMountList := osdefs.DefaultMobyAllowedMounts nonDefaultMounts := []specs.Mount{} for _, mount := range mountList { found := false for _, defaultMount := range defaultMountList { if reflect.DeepEqual(defaultMount, mount) { found = true break } } if !found { nonDefaultMounts = append(nonDefaultMounts, mount) } } return nonDefaultMounts }
Study on Ultimate Load-Carrying Capacity of Long-Span Composite Girder Cable-Stayed Bridge with Three Pylons Taking a long-span composite girder cable-stayed bridge with three pylons under construction as the object of research, this paper establishes a three-dimensional finite element model of a bridge considering the geometric nonlinearity, material nonlinearity and interface slip effect in composite girder, and analyzes the failure loads and failure modes of the structure at bearing capacity limited state. The results show that the ultimate load-carrying capacity is high at bearing capacity limited state, load case whose live load acts on one main span is more unfavorable, and according to the structural failure modes, increasing the ability of the middle pylon to resist bending moment can improve the ultimate load-carrying capacity of the whole bridge quickly.
<reponame>prosolotechnologies/prosolo package org.prosolo.common.email.generators; import java.io.BufferedReader; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.StringWriter; import com.github.mustachejava.DefaultMustacheFactory; import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheFactory; /** * @author <NAME> * @version 0.5 * */ public class MoustacheUtil { public static String compileTemplate(String templateFile, String templateName, Object data) throws IOException { MustacheFactory mf = new DefaultMustacheFactory(); InputStream is = Thread .currentThread() .getContextClassLoader() .getResourceAsStream( templateFile); BufferedReader ir = new BufferedReader(new InputStreamReader(is)); Mustache mustache = mf.compile(ir, templateName); StringWriter outputWriter = new StringWriter(); mustache.execute(outputWriter, data).flush(); return outputWriter.toString(); } }
<gh_stars>0 package cmd import ( "context" "fmt" "path/filepath" dmskube "github.com/altinn/dotnet-monitor-sidecar-cli/pkg/kubernetes" "github.com/altinn/dotnet-monitor-sidecar-cli/pkg/utils" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" ) // ForwardPort forwards port 52323 from host to a pod func ForwardPort(ctx context.Context, kubeconfig string, namespace string, podname string) { if home := homedir.HomeDir(); home != "" && kubeconfig == "" { kubeconfig = filepath.Join(home, ".kube", "config") } config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { panic(err.Error()) } if namespace == "" { namespace, err = utils.GetNamespaceFromCurrentContext() if err != nil { fmt.Printf("Error getting namespace: %v", err) return } } clientset, err := kubernetes.NewForConfig(config) if err != nil { panic(err.Error()) } h := dmskube.Helper{ Client: clientset, } h.PortForward(ctx, namespace, podname) }
<reponame>73-ch/indexnet_matting<gh_stars>0 from concurrent.futures import ThreadPoolExecutor import time from scripts.removal_background import * if __name__ == "__main__": cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # カメラ画像の横幅を1280に設定 cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # カメラ画像の縦幅を720に設定 cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4')) cap.set(cv2.CAP_PROP_FPS, 60) i = 0 ret, frame = cap.read() bg = np.full_like(frame, 255., dtype=float) # bg = bg.astype(float) # thread settings executor = ThreadPoolExecutor(max_workers=10) futures = [] t1 = time() * 1000. while True: ret, frame = cap.read() if not ret: continue futures.append(executor.submit(removal_background, frame, bg)) # cv2.imwrite("./examples/outs/{}.png".format(i), removed)q if cv2.waitKey(1) & 0xFF == ord('q'): break print("running", len(futures)) for i in reversed(range(len(futures))): if not futures[i].running(): removed = futures[i].result() removed = removed.astype(np.uint8) print(time() * 1000. - t1) t1 = time() * 1000. cv2.imshow("removal_background", removed) del futures[i] executor.shutdown() cap.release() cv2.destroyAllWindows()
JOHANNESBURG – Net1 UEPS Technologies' loss of the lucrative contract to distribute billions of rand to social grants recipients and its poor financial reporting on Friday came back to bite the firm, its stock plunging 46 percent. The share price closed 46.86 percent lower at R50.54 on the JSE. The group said it was in the process of preparing restated financial statements for the year ended June 2018, due to it erroneously dotting down its purchase of its stake in Cell C as “available-for-sale” equity instrument. The firm said this resulted in fair value adjustments being recorded in other comprehensive income instead of net income. Herman Kotzé, Net1 chief executive, said on Friday that the mistake was only picked during the preparation of the results for the latest quarter. He said the restatement had no effect on the carrying value of its Cell C investment, but could not be drawn to reveal how its accounting officers did not pick up the error earlier on. “The company is currently assessing the impact of this restatement on its internal control over financial reporting… The error is being corrected by moving the $25.2 million (R361m) fair value adjustment from other comprehensive income to nett income for fiscal 2018,” Kotzé said. Net1 in August last year bought a 15 percent stake in Cell C for R2 billion. Cell C is one of the three major licensed mobile operators in South Africa with more than 15 million subscribers. The group said its audit committee has discussed the error with its auditors with Deloitte & Touche. Deloitte directed all question to Net1. “Our professional responsibilities, standards and contractual obligations with respect to client confidentiality prevent us from commenting any further…” Deloitte said at the weekend. The auditing profession has over the past year come under intense scrutiny after KPMG admitted to multiple poor standards. The near collapse of VBS bank and the near destruction of Steinhoff have also undermined the profession. Deloitte partners have also been accused by the industry’s regulator of having had a blind eye to African Bank incorrectly calculating impairments in its loan book, which led to the collapse of the bank. Meanwhile, Net1, whose subsidiary Cash Paymaster Services (CPS) contract with South Africa Social Security Agency (Sassa) came to an end in September, reported underwhelming results for the quarter ended September. It reported revenue of $126m, down 17 percent year-over-year in dollars and fundamental earnings per share of $0.01 (R0.14), including a loss of $0.28 per share related to CPS.
<gh_stars>0 #ifndef RANKBUTTONS_H #define RANKBUTTONS_H #include <QWidget> #include <QButtonGroup> #include <QHBoxLayout> #include <model/villager/guard.h> template <class T> class RankButtons : public QWidget { private: QButtonGroup* group; public: RankButtons(QWidget* = nullptr); QButtonGroup* getGroup() const; const Guard::Rank* checked() const; QVector<const Guard::Rank*> checkedMany() const; }; template <class T> RankButtons<T>::RankButtons(QWidget* parent) : QWidget(parent), group(new QButtonGroup(this)) { QHBoxLayout* layout = new QHBoxLayout(); std::vector<const Guard::Rank*> ranks = Guard::Rank::vector(); for (unsigned int i = 0; i < ranks.size(); ++i) { T* button = new T(QString::fromStdString(ranks[i]->name())); group->addButton(button, i); layout->addWidget(button); } setLayout(layout); } template<class T> QButtonGroup* RankButtons<T>::getGroup() const { return group; } template<class T> const Guard::Rank* RankButtons<T>::checked() const { return Guard::Rank::vector()[group->checkedId()]; } template<class T> QVector<const Guard::Rank*> RankButtons<T>::checkedMany() const { QList<QAbstractButton*> buttons = group->buttons(); std::vector<const Guard::Rank*> ranks = Guard::Rank::vector(); QVector<const Guard::Rank*> vector; for (int i = 0; i < buttons.size(); ++i) { if (buttons.at(i)->isChecked()) { vector.push_back(ranks[i]); } } return vector; } #endif // RANKBUTTONS_H
<reponame>open-template-hub/countdown export * from './lib/component/countdown/countdown.component'; export * from './lib/countdown.module';
n = input() t = list(n) k = 0 if n.isupper(): print(n.lower()) elif t[0].islower(): for i in range(1,len(t)): if t[i].isupper(): k+=1 if k == len(t) - 1: n = n.capitalize() print(n) else: print(n)
The daily lives of adolescents with an autism spectrum disorder This study explores the daily lives, particularly discretionary time, of adolescents with an autism spectrum disorder (ASD). We describe the activities and activity partners of adolescents, the factors associated with their discretionary time use, and the impact of time use on their autism symptoms. Mothers of 103 adolescents with an ASD completed two 24-hour time diaries to describe their adolescents activity participation during the third wave of a longitudinal study. Adolescents with an ASD spent considerable time in discretionary activities, with watching television and using a computer as the most frequent activities. They most frequently spent discretionary time alone or with their mothers. They spent little time engaged in conversations or doing activities with peers. Age, gender, the presence of intellectual disability, severity of autism symptoms and maladaptive behaviors, the number of siblings, maternal education, marital status, and family income were associated with adolescent time use. Notably, greater time spent in conversation and reading predicted future decreases in severity of social impairment. The way that adolescents with an ASD spend their free time may have implications for their development and the course of their autism symptoms.
. Norfloxacin (NFLX, AM-715), a new quinolone antibiotic agent, was evaluated clinically and bacteriologically for its efficacy and safety in pediatrics by a study group organized with pediatricians from all over the country. A summary of the results of the evaluation is as follows. 1. Incidence of NFLX-resistant strains (MIC over 12.5 micrograms/ml) isolated from children with various infections was 1.6% (8/512). One resistant strain was observed among 45 isolates of Staphylococcus aureus, and none among 30 isolates of Pseudomonas aeruginosa. 2. After single oral administration of 1.5-2.9, 3.0-4.8 and 5.1-6.1 mg/kg NFLX in tablet form at fasting, mean peak values of serum concentration of 0.37, 0.56, 0.92 micrograms/ml, T1/2 of 2.5, 2.6, 2.6 hours and urinary recovery rates in 8 hours at 25.3, 25.3, 27.1% were observed, respectively. 3. Clinical effects were studied chiefly in intestinal and urinary tract infections. Among 317 patients from whom pathogens had been isolated, responses to the treatment were excellent in 187, good in 79, fair in 9, poor in 7, and unknown in 35 cases. The overall efficacy rate was 94.3% (266/282) and the efficacy rate for excellent responses was 70.3% (187/266). Among all the 406 patients treated, including those with undetermined pathogens, responses were excellent in 233, good in 106, fair in 11, poor in 11, and unknown in 45 cases. The overall efficacy rate was 93.9% (339/361). 4. Clinical effects of NFLX classified by diseases with identified pathogens were 81.8% (9/11) for acute pneumonia, 80.8% (21/26) for other respiratory infections, 95.8% (23/24) for bacillary dysentery, 98.6% (70/71) for Campylobacter enteritis, 100% (24/24) for Salmonella enteritis, 100% (6/6) for other acute enteritis and 98.1% (104/106) for urinary tract infections. Including other infections as high as 94.3% (266/282) of efficacy rate was obtained in total. There was no significant difference in NFLX efficacies between unidentified and identified pathogens. Thus, the total clinical efficacy rate was 93.9% (339/361). 5. The total eradication rate of 325 pathogens evaluable was 84.3%, with identical eradication rates for Gram-positive cocci (GPC) (43/51) and for Gram-negative rods (GNR) (231/274). 6. The optimal daily dose of NFLX seemed to be in a range between 6.0 and 12.0 mg/kg, and the optimal duration of treatment to be 7 days for children over 5 years old. 7. The clinical efficacy in treating P. aeruginosa infections in 12 patients was 100% (11/11) and the eradication rate was 83.3% (10/12).(ABSTRACT TRUNCATED AT 400 WORDS)
from django.urls import path, include from rest_framework.routers import DefaultRouter from .views import TeamViewSet, UserDetail, get_my_team, cancel_plan, add_member, upgrade_plan, get_stripe_pub_key, check_session, create_checkout_session, stripe_webhook router = DefaultRouter() router.register('teams', TeamViewSet, basename='teams') urlpatterns = [ path('teams/member/<int:pk>/', UserDetail.as_view(), name='userdetail'), path('teams/get_my_team/', get_my_team, name='get_my_team'), path('teams/add_member/', add_member, name='add_member'), path('teams/upgrade_plan/', upgrade_plan, name='upgrade_plan'), path('teams/cancel_plan/', cancel_plan, name='cancel_plan'), path('stripe/get_stripe_pub_key/', get_stripe_pub_key, name='get_stripe_pub_key'), path('stripe/create_checkout_session/', create_checkout_session, name='create_checkout_session'), path('stripe/webhook/', stripe_webhook, name='stripe_webhook'), path('stripe/check_session/', check_session, name='check_session'), path('', include(router.urls)), ]
<reponame>Shufang-Zhu/GFSynth #ifndef PLAYER_H #define PLAYER_H namespace Syft { enum class Player { Agent, Environment }; } #endif // PLAYER_H
/** * Slice which finds repository by path. * @since 0.9 */ final class SliceByPath implements Slice { /** * HTTP client. */ private final ClientSlices http; /** * Artipie settings. */ private final Settings settings; /** * New slice from settings. * * @param http HTTP client * @param settings Artipie settings */ SliceByPath(final ClientSlices http, final Settings settings) { this.http = http; this.settings = settings; } // @checkstyle ReturnCountCheck (20 lines) @Override @SuppressWarnings("PMD.OnlyOneReturn") public Response response(final String line, final Iterable<Map.Entry<String, String>> headers, final Publisher<ByteBuffer> body) { final Optional<Key> key = this.settings.layout().keyFromPath( new RequestLineFrom(line).uri().getPath() ); if (key.isEmpty()) { return new RsWithBody( new RsWithStatus(RsStatus.NOT_FOUND), "Failed to find a repository", StandardCharsets.UTF_8 ); } return new ArtipieRepositories(this.http, this.settings).slice(key.get(), false) .response(line, headers, body); } }
1. Field of the Invention The present invention relates to a method and apparatus for avoiding an interference in a local area wireless communication system, and more particularly, to a method and apparatus for avoiding a frequency interference in a local area wireless communication network when each device detects the frequency interference. 2. Description of the Related Art Nowadays, due to the development and convenience of wireless communication technologies such as ZigBee, various wireless communication technologies are used in more electronic devices. Unlike wired communication, an increases in the frequency of use of wireless communication may cause a radio interference between wireless communication devices. Most wireless communication technologies nearly avoid a mutual interference by dividing use frequencies. However, in the case of an ISM frequency band around 2.4 GHz, the use of which is rapidly increasing in recent years, the problem of a radio interference becomes more serious because many wireless technologies use the same frequency. Thus, it is very important to develop technologies for solving the problem of the radio interference between wireless technologies using the ISM frequency band. The problem of the radio interference between wireless technologies is treated in two directions of detecting an interference and avoiding an interference. According to subjects, interference avoiding methods may be classified into a method for detecting and avoiding an interference by a controller or a master device or each router in a wireless communication system, and a method for detecting and avoiding an interference by each device itself. In most technologies for solving a radio interference in wireless communication, a controller or a master device or each router detects and avoids an interference in a wireless communication system. However, there is little technology that solves the problem of a radio interference in a wireless communication system by each member device itself not by a controller or a master device or each router. Korean Patent Application Laid-open No. 2010-0048642 filed by the present applicant discloses a method for detecting an interference by each member device itself. However, there is no effective technology developed to avoid an interference when each member device itself detects a frequency interference.
/** * Dropdown view holder. If placeholder is present, then select the 1st option otherwise neglect it */ public class KmPrechatDropdownViewHolder extends RecyclerView.ViewHolder { private Spinner dropdownSpinner; public KmPrechatDropdownViewHolder(@NonNull View itemView) { super(itemView); dropdownSpinner = itemView.findViewById(R.id.prechatDropdownSpinner); dropdownSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { @Override public void onItemSelected(AdapterView<?> adapterView, View view, int i, long l) { KmPrechatInputModel model = getItem(getAdapterPosition()); if (model != null) { if(model.getPlaceholder() != null && i > 0) { dataMap.put(model.getField(), adapterView.getItemAtPosition(i).toString()); } else if(model.getPlaceholder() == null) { dataMap.put(model.getField(), adapterView.getItemAtPosition(i).toString()); } } } @Override public void onNothingSelected(AdapterView<?> adapterView) {} }); } }
A ubiquitin-based vector for the co-ordinated synthesis of multiple proteins in plants. The genetic engineering of complex traits into crop plants will ultimately require strategies to co-express more than one protein at the same time. Here, we report the development of a ubiquitin (Ub)-based expression method that can generate two proteins from a single transcript. It contains coding regions for the proteins of interest, separated in-frame by the coding region for the C-terminal end of Ub followed by a full-length Ub. On expression in tobacco, this polycistronic messenger RNA (mRNA) is translated to produce a chimeric protein that is rapidly processed by endogenous deubiquitinating proteases to release the two proteins plus a Ub moiety in intact forms. The C-terminal protein domain is released without additional amino acids, whereas the N-terminal protein domain retains the short C-terminal end of Ub. The analysis of vectors with progressively shorter C-terminal ends indicates that only the last six C-terminal amino acids of the proximal Ub domain are needed for efficient processing in plants. By comparing the levels of luciferase and beta-glucuronidase simultaneously expressed by this method in multiple independent tobacco transformants, we synthesized consistently similar ratios of the two proteins over a wide range of protein amounts. Ub-based polyprotein vectors should facilitate the genetic engineering of crops by providing a simple method for the co-ordinated and stoichiometric synthesis of two or more proteins.
OTTAWA (Reuters) - The Canadian government, as expected, on Friday approved extradition proceedings against the chief financial officer of Huawei Technologies Co Ltd, prompting a furious reaction from China. Meng Wanzhou, the daughter of Huawei’s founder, was detained in Vancouver last December and is under house arrest. In late January the U.S. Justice Department charged Meng and Huawei with conspiring to violate U.S. sanctions on Iran. Meng is due to appear in a Vancouver court at 10 a.m. Pacific time (1800 GMT) on March 6, when a date will be set for her extradition hearing. China, whose relations with Canada have deteriorated badly over the affair, denounced the decision and repeated previous demands for Meng’s release. U.S. Justice Department spokeswoman Nicole Navas Oxman said Washington thanked the Canadian government for its assistance. “We greatly appreciate Canada’s steadfast commitment to the rule of law,” she said in a statement. Legal experts had predicted the Liberal government of Prime Minister Justin Trudeau would give the go-ahead for extradition proceedings, given the close judicial relationship between Canada and the United States. But it could be years before Meng is sent to the United States, since Canada’s slow-moving justice system allows many decisions to be appealed. Professor Wesley Wark of the University of Ottawa’s Graduate School of Public and International Affairs said “the Canadians will take a beating throughout this whole process” from China. “I suspect the Trudeau government is desperately hoping that the Americans reach a deal with the Chinese,” he said by phone. After Meng’s detention, China arrested two Canadians on national security grounds, and a Chinese court later sentenced to death a Canadian man who previously had only been jailed for drug smuggling. Brock University professor Charles Burton, a former Canadian diplomat who served two postings in China, said Beijing was likely to retaliate further. “They’re not going to take this lying down ... one shudders to think what the consequences could be,” he told the Canadian Broadcasting Corp, saying Beijing might crack down on Canadian canola shipments or stop Chinese students from going to Canada. Ottawa rejects Chinese calls to release Meng, saying it cannot interfere with the judiciary. “The Chinese side is utterly dissatisfied with and firmly opposes the issuance of (the) authority to proceed,” the embassy in Ottawa said in a statement. Beijing had earlier questioned the state of judicial independence in Canada, noting the government faces accusations that it tried to intervene to stop a corruption trial. Canadian Justice Minister David Lametti declined to comment. Huawei was not immediately available for comment. Meng’s lawyers said they were disappointed and described the U.S. charges as politically motivated.
The Effectiveness of Sandplay Therapy to Improve Students Self-Esteem: A Preliminary Study in Brunei Darul Salam Purpose: This research was conducted to identify the effectiveness of sandplay therapy to improve students self-esteem among students. Methods: The sample was 16 students for experimental group, who received sandplay therapy, in comparison to the 16 students from control group who did not receive any treatment intervention. The instrument used in this research was Self-Esteem Inventory developed by Coopersmith. Data were analyzed using SPSS version 22 using Manova Repeated Measure Method, pre-test and post-test instruments. Results: The results from multivariate Pillais Trace test shows the main effect of the sandplay therapy from the post-test is significant F = 41.372, p<.05. The univariate shows that there is significant difference of the sandplay therapy to the general self-esteem F = 49.853 <.05, social self-esteem F = 63.646 <.05, parental self-esteem F = 82.924 <.05 and academic self-esteem F = 80.071 <.05. Conclusions: Sandplay therapy can be used in school to help students improve their self-esteem. Given this, one of the issues relating to adolescents is low self-esteem. Therefore, by applying sandplay therapy can minimize the issue surrounding low self-esteem among adolescents particularly in Brunei Darussalam However, since limited study on the usage of this kind of therapy, it is suggested that further study on the applicability of this therapy should be conducted.
// connect to kafka and twitter client and then start producing to kafka topic private void run() { logger.info("Setting up"); client = createTwitterClient(msgQueue); client.connect(); producer = createKafkaProducer(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { logger.info("Application is not stopping!"); client.stop(); logger.info("Closing Producer"); producer.close(); logger.info("Finished closing"); })); while (!client.isDone()) { String msg = null; try { msg = msgQueue.poll(5, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); client.stop(); } if (msg != null) { logger.info(msg); producer.send(new ProducerRecord<String, String>(KafkaClientConfig.TOPIC, null, msg), new Callback() { @Override public void onCompletion(RecordMetadata recordMetadata, Exception e) { if (e != null) { logger.error("Some error OR something bad happened", e); } } }); } } logger.info("\n Application End"); }
// -*- C++ -*- // // ---------------------------------------------------------------------- // // <NAME>, U.S. Geological Survey // // This code was developed as part of the Computational Infrastructure // for Geodynamics (http://geodynamics.org). // // Copyright (c) 2010-2017 University of California, Davis // // See COPYING for license information. // // ---------------------------------------------------------------------- // /** @file tests/libtests/units/TestParser.hh * * @brief C++ TestParser object * * C++ unit testing for Parser. */ #if !defined(spatialdata_units_testparser_hh) #define spatialdata_units_testparser_hh #include <cppunit/extensions/HelperMacros.h> /// Namespace for spatialdata package namespace spatialdata { namespace units { class TestParser; } // units } // spatialdata /// C++ unit testing for Parser class spatialdata::units::TestParser : public CppUnit::TestFixture { // class TestParser // CPPUNIT TEST SUITE ///////////////////////////////////////////////// CPPUNIT_TEST_SUITE( TestParser ); CPPUNIT_TEST( testConstructor ); CPPUNIT_TEST( testLength ); CPPUNIT_TEST( testTime ); CPPUNIT_TEST( testVelocity ); CPPUNIT_TEST( testDensity ); CPPUNIT_TEST( testPressure ); CPPUNIT_TEST( testError ); CPPUNIT_TEST_SUITE_END(); // PUBLIC METHODS ///////////////////////////////////////////////////// public : /// Test constructor. void testConstructor(void); /// Test parse() with length scale. void testLength(void); /// Test parse() with time scale. void testTime(void); /// Test parse() with velocity scale. void testVelocity(void); /// Test parse() with density scale. void testDensity(void); /// Test parse() with pressure scale. void testPressure(void); /// Test trapping errors with parse(). void testError(void); }; // class TestParser #endif // spatialdata_units_testparser_hh // End of file
<reponame>wildbot-project/WildBot /* * Copyright 2017 <NAME>. (<NAME>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ru.wildbot.core.api.manager; import ru.wildbot.core.api.exception.AlreadyDisabledException; import ru.wildbot.core.api.exception.AlreadyEnabledException; public interface WildBotNettyManager { void enableNetty() throws Exception; void disableNetty() throws Exception; default boolean toggleNetty() throws Exception { if (isNettyEnabled()) disableNetty(); else enableNetty(); return isNettyEnabled(); } boolean isNettyEnabled(); default void checkNettyEnabled() throws Exception { if (isNettyEnabled()) throw new AlreadyEnabledException("Netty already enabled for Manager"); } default void checkNettyDisabled() throws Exception { if (!isNettyEnabled()) throw new AlreadyDisabledException("Netty already disabled for Manager"); } }
<filename>node_modules/styled-icons/remix-line/Headphone/Headphone.ts export * from '@styled-icons/remix-line/Headphone'
Shortly before sitting down with senior Samsung executives, prolific industry insider Ice Universe revealed it is “a high probability” that the company will increase the camera sensor size on the Galaxy Note 10. This is big news as it not only opens up new possibilities, it would be the first time Samsung has boosted the megapixels of a flagships primary camera since the Galaxy S4 in 2013. Needless to say, you’d be absolutely right at this point to jump in and say: more megapixels does not necessarily equate to better image quality. And you’d be right if this were not 2019. Today more megapixels, working in conjunction with advanced stabilisation and image processing, can deliver more detail which in turn enables images to be cropped while retaining greater image quality. Nokia’s famous Lumia 1020 was ahead of its time here with a 41-megapixel sensor that was so good images could be cropped to deliver results akin to an optical zoom. Meanwhile, Huawei, arguably Samsung’s biggest threat, unveiled the P30 Pro with a 40-megapixel primary sensor. Back to the future? Yes, more megapixels equals more storage space so - despite Samsung’s capacity boosts this year - not everyone will be a fan. That said, when combined with reports the Galaxy Note 10 will have a quad-core camera rather than the triple array on the Galaxy S10 and S10 Plus, next-gen storage and next-gen RAM as well as 5G as standard, the new Note looks a genuine step-up. In fact, at this point, it looks like Samsung would have to do something pretty crazy to spoil the Galaxy Note 10... Ah yes, don't look now.
/** Logs a fault message, if specified. * * @param fmsg the fault message */ public void logFaultMessage(String fmsg) { if (fmsg != null) { CommonKnowledgeLogger.ROOT_LOGGER.formattedFaultMessage(fmsg); } }
/* * Check construction requirements */ #include "variolite/all.hpp" int main() { using v2i = v2::vector<int, 2>; v2i a(1); // not enough arguments }
Incidence and Risk Factors of the Watershed Shift Phenomenon after Superficial Temporal Artery-Middle Cerebral Artery Anastomosis for Adult Moyamoya Disease Objective: Superficial temporal artery-middle cerebral artery (STA-MCA) anastomosis is the standard surgical management for adult moyamoya disease (MMD) patients, but local cerebral hyperperfusion (CHP) and cerebral ischemia are potential complications of this procedure. Recent hemodynamic analysis of the acute stage after revascularization surgery for MMD revealed a more complex and unique pathophysiological condition, the so-called watershed shift (WS) phenomenon, which is defined as a paradoxical decrease in the cerebral blood flow (CBF) at the adjacent cortex near the site of local CHP. The objective of this study was to clarify the exact incidence, clinical presentation, and risk factors of the WS phenomenon after direct revascularization surgery for adult MMD. Patients and Methods: Among 74 patients with MMD undergoing STA-MCA anastomosis for 78 affected hemispheres, 60 adult patients comprising 64 hemispheres underwent serial quantitative CBF analysis by N-isopropyl-p- iodoamphetamine single-photon emission computed tomography after revascularization surgery. The local CBF was quantitatively measured at the site of anastomosis and the adjacent cortex before surgery, as well as on 1 and 7 days after surgery. Then, we investigated the incidence, clinical presentation, and risk factors of the WS phenomenon. Results: The WS phenomenon was evident in 7 patients (7/64 hemispheres; 10.9%) after STA-MCA anastomosis for adult MMD. None of the patients developed neurological deterioration due to the WS phenomenon, but 1 patient developed reversible ischemic change on diffusion-weighted imaging at the site of the WS phenomenon. Multivariate analysis revealed that a lower preoperative CBF value was significantly associated with the occurrence of the WS phenomenon (20.3 ± 7.70 mL/100 g/min in WS-positive group vs. 31.7 ± 8.81 mL/100 g/min in WS-negative group, p= 1.1 102). Conclusions: The incidence of the WS phenomenon was as high as 10.9% after STA-MCA anastomosis for adult MMD. The clinical outcome of the WS phenomenon is generally favorable, but there is a potential risk for perioperative cerebral infarction. Thus, we recommend routine CBF measurement in the acute stage after revascularization surgery for adult MMD to avoid surgical complications, such as local CHP and cerebral ischemia, caused by the WS phenomenon. Concomitant detection of the WS phenomenon with local CHP is clinically important because blood pressure reduction to counteract local CHP may have to be avoided in the presence of the WS phenomenon.
<filename>blimp/client/app/android/java/src/org/chromium/blimp/session/BlimpClientSession.java // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.blimp.session; import org.chromium.base.annotations.CalledByNative; import org.chromium.base.annotations.JNINamespace; import org.chromium.blimp.R; import org.chromium.blimp.assignment.Result; import java.util.ArrayList; import java.util.List; /** * The Java representation of a native BlimpClientSession. This is primarily used to provide access * to the native session methods and to facilitate passing a BlimpClientSession object between Java * classes with native counterparts. */ @JNINamespace("blimp::client") public class BlimpClientSession { /** * An observer for when the session needs to notify the UI about the state of the Blimp session. */ public interface ConnectionObserver { /** * Called when an engine assignment has been successful or failed. * @param result The result code of the assignment. See * assignment_source.h for details. Maps to a value in * {@link Result}. * @param suggestedMessageResourceId A suggested resource id for a string to display to the * user if necessary. * @param engineInfo IP address and version of blimp engine. */ void onAssignmentReceived( int result, int suggestedMessageResourceId, EngineInfo engineInfo); /** * Called when a connection to the engine was made successfully. */ void onConnected(); /** * Called when the engine connection was dropped. * @param reason The string-based error code. * See net/base/net_errors.h for a complete list of codes * and their explanations. */ void onDisconnected(String reason); /** * Called to update the debug UI about network statistics for the current web page. * @param received Number of bytes received. * @param sent Number of bytes sent. * @param commit Number of commits completed. */ void updateDebugStatsUI(int received, int sent, int commits); } private final String mAssignerUrl; private final List<ConnectionObserver> mObservers; private long mNativeBlimpClientSessionAndroidPtr; public BlimpClientSession(String assignerUrl) { mAssignerUrl = assignerUrl; mObservers = new ArrayList<ConnectionObserver>(); mNativeBlimpClientSessionAndroidPtr = nativeInit(mAssignerUrl); } /** * Add an observer to be notified about the connection status. * @param observer The observer to add. */ public void addObserver(ConnectionObserver observer) { mObservers.add(observer); } /** * Remove an observer from the observer list. * @param observer The observer to remove. */ public void removeObserver(ConnectionObserver observer) { mObservers.remove(observer); } /** * Retrieves an assignment and uses it to connect to the engine. * @param token A OAuth2 access token for the account requesting access. */ public void connect(String token) { nativeConnect(mNativeBlimpClientSessionAndroidPtr, token); } /** * Destroys the native BlimpClientSession. This class should not be used after this is called. */ public void destroy() { if (mNativeBlimpClientSessionAndroidPtr == 0) return; mObservers.clear(); nativeDestroy(mNativeBlimpClientSessionAndroidPtr); mNativeBlimpClientSessionAndroidPtr = 0; } // Methods that are called by native via JNI. @CalledByNative private void onAssignmentReceived(int result, String engineIP, String engineVersion) { if (mObservers.isEmpty()) return; int resultMessageResourceId = R.string.assignment_failure_unknown; switch (result) { case Result.OK: resultMessageResourceId = R.string.assignment_success; break; case Result.BAD_REQUEST: resultMessageResourceId = R.string.assignment_failure_bad_request; break; case Result.BAD_RESPONSE: resultMessageResourceId = R.string.assignment_failure_bad_response; break; case Result.INVALID_PROTOCOL_VERSION: resultMessageResourceId = R.string.assignment_failure_bad_version; break; case Result.EXPIRED_ACCESS_TOKEN: resultMessageResourceId = R.string.assignment_failure_expired_token; break; case Result.USER_INVALID: resultMessageResourceId = R.string.assignment_failure_user_invalid; break; case Result.OUT_OF_VMS: resultMessageResourceId = R.string.assignment_failure_out_of_vms; break; case Result.SERVER_ERROR: resultMessageResourceId = R.string.assignment_failure_server_error; break; case Result.SERVER_INTERRUPTED: resultMessageResourceId = R.string.assignment_failure_server_interrupted; break; case Result.NETWORK_FAILURE: resultMessageResourceId = R.string.assignment_failure_network; break; case Result.UNKNOWN: default: resultMessageResourceId = R.string.assignment_failure_unknown; break; } for (ConnectionObserver observer : mObservers) { observer.onAssignmentReceived(result, resultMessageResourceId, new EngineInfo(engineIP, engineVersion, mAssignerUrl)); } } @CalledByNative void onConnected() { for (ConnectionObserver observer : mObservers) { observer.onConnected(); } } @CalledByNative void onDisconnected(String reason) { for (ConnectionObserver observer : mObservers) { observer.onDisconnected(reason); } } @CalledByNative private long getNativePtr() { assert mNativeBlimpClientSessionAndroidPtr != 0; return mNativeBlimpClientSessionAndroidPtr; } /** * Makes a JNI call to pull the debug statistics. */ public int[] getDebugStats() { if (mNativeBlimpClientSessionAndroidPtr == 0) return null; return nativeGetDebugInfo(mNativeBlimpClientSessionAndroidPtr); } private native long nativeInit(String assignerUrl); private native void nativeConnect(long nativeBlimpClientSessionAndroid, String token); private native void nativeDestroy(long nativeBlimpClientSessionAndroid); private native int[] nativeGetDebugInfo(long nativeBlimpClientSessionAndroid); }
SUBSYSTEM_STARTUP_MSG = 'Starting {} subsystem.' SHUTDOWN_MSG = 'Shutdown requested by user. Shutting down.' SVC_START_MSG = 'AlfMonitor service started at {}' ACTVE_AGENTS_MSG = 'Active Agents {}' INACTIVE_MAIL_MSG = ( 'Mail agent is not active and will not be run. ' 'Warning: Email alerts will not sent.' ) START_AGENT_MSG = 'Starting agent: {}' ENGINE_SLEEP_MSG = ( 'Alfmonitor engine sleeping for ' '{} seconds until next run.' ) SCRIPT_CALL_MSG = 'This script should only be called by engine.py.' MAIL_INIT_MSG = 'Mailer subsystem initialized.' MAIL_START_MSG = 'Mailer subsystem enabled, started and ready.' MAIL_SEARCH_MSG = 'Searching for alarms to mail.' ALARM_MSG = ( 'An alarm was generated: \n\n' '{}\n\n' 'This event occurred at {}\n' 'Connection time was {} ms.\n\n' ) SEND_TO_MSG = 'Sending alert email to the following: {}' MAIL_SLEEP_MSG = 'Mailer subsystem sleeping for {} seconds.' DISABLED_MAIL_MSG = ( 'Mailer subsystem disabled. ' 'Mailer subsystem will not be started. ' 'Warning: emails for alarms will not be sent.' ) RUN_AGENT_MSG = 'Running {} Agent.' NO_PROFILE_CONFIGURED_MSG = ( 'No profiles configured for {} agent. ' 'Consult documentation about profile configuration.' ) MISSING_AGENT_MSG = "Check that the agent's name is not misspelled or missing."
Maternal morbidity and perinatal outcomes in rural versus urban areas We commend Lisonkova and colleagues for the breadth of morbidities they included in their article. However, we are disappointed and somewhat astonished over the absence of any attempt to include an analysis of a crucial variable in the discussion: distance to services. This limitation not only We commend Lisonkova and colleagues for the breadth of morbidities they included in their article. 1 However, we are disappointed and somewhat astonished over the absence of any attempt to include an analysis of a crucial variable in the discussion: distance to services. This limitation not only weakens the conclusions of the study, but also calls into question the validity of the findings. The authors note, "The limitations of our study include the lack of individual information on the time needed to travel to the nearest health care facility...." Although individual information would be ideal, all we need to know is whether birthing women have access to maternity services in their community. This lack of service-level consideration undermines the article. There is strong evidence from British Columbia and internationally that local access to maternity care is an important influence on maternal newborn outcomes. This lack of attention to distance creates a conceptual shortcoming: the rural group has been defined by its isolation from population centres (i.e., maternity health services), but distance to services (predictor of outcomes) is ignored in the cohort analysis. From previous work with an overlapping data set, we predict that of the 25 855 rural cases, between 4000 and 6000 will be from communities that are more than one hour from the nearest maternity services. This could easily account for the relatively minor differences in the odds ratios for the three principal morbidities (eclampsia, obstetric embolism and uterine scar dehiscence/rupture). Once distance to services is accounted for, data from BC and Canada show that women from communities without maternity services have poorer outcomes than those from com-munities with services. Data also show that women from communities with primary maternity care (i.e., no cesarean delivery) and communities where cesarean delivery is provided by family physicians with enhanced surgical skills have outcomes as good as those from communities with obstetricians providing care. To suggest, as the authors do in their conclusion, that in rural communities "the emphasis should remain on monitoring" for those conditions "requiring advanced obstetric and neonatal care" is not only misguided, but also impugns the excellent maternity services being provided in communities that are still offering services. We question why CMAJ published this manuscript. It is a weak cohort analysis that ignores the key health services determinants of outcomes for rural maternity care, but then makes recommendations about the organization of health services. The article presents misleading and potentially frightening data for women in rural areas who are trying to decide where they should give birth. A worthwhile adjustment to the analysis of this data would be to stratify those women according to whether they have a local maternity service in their community, and then examine morbidities. The literature has already demonstrated good outcomes for newborns. We expect that data will show the same for maternal outcomes. On behalf of the Society of Rural Physicians of Canada, we believe that the study by Lisonkova and col-leagues 1 does a disservice to rural maternity providers, and fails to address the factors that most influence maternal morbidity and perinatal outcomes. Stefan Grzybowski MD Jude Kornelsen PhD The authors allude to the fact that closure of rural maternity units may have played a role in the outcomes in their study; however, they downplay this fact and choose to focus on the providers, suggesting that, "the emphasis should remain on monitoring for potentially life-threatening maternal and perinatal complications...." In our experience, rural maternity care practitioners would not neglect to notice when preeclampsia progressed to eclampsia, for example. With reduced access to maternity care, women in rural Canada will present later, attend less frequent appointments or even choose to avoid transfer for delivery, which results in an increased risk of complications. Further, general health care teams are less prepared than rural teams when women make these choices. We cannot ignore the need to provide local access to care. Pregnant women in rural areas tend to be younger, have higher rates of smoking or substance use, and have preexisting hypertension. When a pregnancy is labelled high risk, will a woman choose to leave her family, sometimes for weeks before delivery, and travel hundreds (or thousands) of kilometres to receive care? Will she want to deliver where the culture and language may be different, at substantial personal financial cost, and where her support people may not be present? In areas with primary maternity care (no cesarean delivery), lowvolume maternity units or maternity care with family physicians with enhanced surgical skills for cesarean delivery, rural women have outcomes equal to those of their urban counterparts. As rural and urban maternity care providers, we should be advocating strongly for the strengthening of rural maternity services to improve Letters
// // A bitmap can be added in two ways: // 1. Send a bitmap in the hBMPNew field. The uiBMPType parameter needs to be a BITMAP_BMP* // The uiCount and the ptb parameters are ignored // The offset is placed in puiOffset // // 2. A TBADDBITMAP struct can be sent. The uiCount should have the count // uiBMPType parameter needs to be a BITMAP_TBA* value // The offset is placed in puiOffset HRESULT CInternetToolbar::AddBitmap(const GUID * pguidButtonGroup, UINT uiBMPType, UINT uiCount, TBADDBITMAP * ptb, LRESULT * pOffset, COLORREF rgbMask) { UINT uiGetMSG, uiSetMSG; TBBMP_LIST tbl = {NULL}; TraceMsg(DM_ITBAR, "CITBar::AddBitmaP called"); *pOffset = -1; _CreateBands(); if ((!pguidButtonGroup) || (!IsWindow(_btb._hwnd)) || !_hdsaTBBMPs) { TraceMsg(DM_ERROR, "CITBar::AddBitmaP failed"); return E_FAIL; } TBBMP_LIST * pTBBs = NULL; int nCount = DSA_GetItemCount(_hdsaTBBMPs); for (int nIndex = 0; nIndex < nCount; nIndex++) { pTBBs = (TBBMP_LIST*)DSA_GetItemPtr(_hdsaTBBMPs, nIndex); if ((pTBBs) && (pTBBs->hInst == ptb->hInst) && (pTBBs->uiResID == ptb->nID)) break; pTBBs = NULL; } if ((ptb->hInst == HINST_COMMCTRL) && (!pTBBs)) { TraceMsg(DM_ERROR, "CITBar::AddBitmap failed - bogus ResID for HINST_COMMCTL"); return E_FAIL; } if (ptb->hInst != HINST_COMMCTRL) { TCHAR szDLLFileName[MAX_PATH], *pszFN; memset(szDLLFileName, 0, ARRAYSIZE(szDLLFileName)); if (GetModuleFileName(ptb->hInst, szDLLFileName, ARRAYSIZE(szDLLFileName))) { pszFN = PathFindFileName(szDLLFileName); if(!lstrcmpi(pszFN, TEXT("fontext.dll"))) *pOffset = FONTGLYPH_OFFSET; else if (!lstrcmpi(pszFN, TEXT("shell32.dll"))) { if ((ptb->nID == 140) || (ptb->nID == 141)) *pOffset = BRIEFCASEGLYPH_OFFSET; } else if (!lstrcmpi(pszFN, TEXT("rnaui.dll"))) *pOffset = RNAUIGLYPH_OFFSET; else if (!lstrcmpi(pszFN, TEXT("webcheck.dll"))) *pOffset = WEBCHECKGLYPH_OFFSET; if (*pOffset != -1) return S_OK; } } if (!pTBBs) { tbl.hInst = ptb->hInst; tbl.uiResID = ptb->nID; nIndex = DSA_AppendItem(_hdsaTBBMPs, &tbl); if (nIndex < 0) { TraceMsg(DM_ERROR, "CITBar::AddBitmap failed"); return E_FAIL; } pTBBs = (TBBMP_LIST*)DSA_GetItemPtr(_hdsaTBBMPs, nIndex); if (!pTBBs) { TraceMsg(DM_ERROR, "CITBar::AddBitmap failed"); return E_FAIL; } } switch(uiBMPType) { case BITMAP_NORMAL: if ((pTBBs) && (pTBBs->fNormal)) { *pOffset = pTBBs->uiOffset; return S_OK; } else if (pTBBs) pTBBs->fNormal = TRUE; uiGetMSG = TB_GETIMAGELIST; uiSetMSG = TB_SETIMAGELIST; break; case BITMAP_HOT: if ((pTBBs) && (pTBBs->fHot)) { *pOffset = pTBBs->uiOffset; return S_OK; } else if (pTBBs) pTBBs->fHot = TRUE; uiGetMSG = TB_GETHOTIMAGELIST; uiSetMSG = TB_SETHOTIMAGELIST; break; case BITMAP_DISABLED: if ((pTBBs) && (pTBBs->fDisabled)) { *pOffset = pTBBs->uiOffset; return S_OK; } else if (pTBBs) pTBBs->fDisabled = TRUE; uiGetMSG = TB_GETDISABLEDIMAGELIST; uiSetMSG = TB_SETDISABLEDIMAGELIST; break; default: ASSERT(FALSE); return E_FAIL; } *pOffset = _AddBitmapFromForeignModule(uiGetMSG, uiSetMSG, uiCount, ptb->hInst, ptb->nID, rgbMask); if (pTBBs) pTBBs->uiOffset = (UINT)*pOffset; return S_OK; }
<reponame>Anshhdeep/Hacktoberfest2020-Expert package src; import java.util.HashSet; import java.util.Set; public class largest_consecutive_sum { public int longestConsecutive(int[] nums) { int ans = Integer.MIN_VALUE; if (nums.length == 0) return 0; Set<Integer> set = new HashSet<>(); for(int i : nums) set.add(i); for(int i : nums) { if (!set.contains(i-1)) { int streak = 1; int k = i; while(set.contains(k+1)) { streak++; k++; } ans = Math.max(ans, streak); } } return ans; } public static void main(String[] args) { //main function } }
Promoting Ginger Oleoresin Production in Nigeria for Economic Growth and Sustainable Supply to User Industries Nigeria ranks 2 nd amongst top 10 ginger producers in the world with a production of 691,239 tonnes and its ginger is among the best, with its aroma, pungency and high oil and oleoresin content as distinct features. The most important form of ginger commercially is the dried form because it can be further processed in the industry to ginger powder, ginger oil and ginger oleoresin. There is an increasing international demand for ginger oleoresins, especially for the production of alcoholic beverages, ginger ale and gingerbread. The global ginger oil market is expected to grow at a CAGR of 9.41 % from 2020 to 2025 to reach a total market size of US$189.431 million by 2025, increasing from US$110.435 million. Nigeria, with the volume of ginger production and distinctive features of its ginger, should tap into this market demand. However, ginger is exported from Nigeria in the split-dried form while value-added products such as ginger powder, essential oils and oleoresin are imported at high cost. According to the Nigeria Customs Service data, 700,891 kg of resinoids and mixtures of odoriferous substances were imported to Nigeria from 2016 to 2019 at a value of ₦1.24 billion. To take advantage of the ever expanding global oleoresin market, formation of farmers clusters/cooperatives, development of farmers-processors linkages, quality assurance through Good Agricultural Practices, technology development, Public-Private Partnerships, development of Nigeria Industrial Standards (NIS) for oleoresins and import restrictions on ginger oleoresin are recommended. There is need for synergy amongst government agencies to harmonize and integrate various development plans and strategies for ginger value chain. proper coordination is also recommended to harmonize all the activities in the sector for greater impact. Introduction Oleoresins are semi-solid extracts, from spices and herbs, composed of resin and essential or fatty oil. Oleoresin is oil and resin that is a mixture of essential oils as an aroma carrier and a kind of resin as a carrier of flavour (Seema, et al., 2016;Susheela, 2000). They are naturally occurring mixture of essential oil and a resin extracted from various spices with organic solvents. Oleoresins are characterised by high potency of active components which enables their usage in small dosages and they find applications in Beverages, Meat Canning, Confectionery, Sauces and Pharmaceuticals. They are also used as a base for a number of seasonings. Oleoresins are increasingly used in pharmaceutical applications owing to its antioxidant and anti-inflammatory properties. The type of solvent affects the quality and quantity of oleoresin obtained (). Oleoresins provide a number of advantages over traditional spices as flavouring agents. They represent the complete flavour and non-volatile resinous fraction present in the spices (Sharma Sharma, 2004). Important flavour compounds found in culinary herbs and other spice plants include eugenol from allspice, cinnamon, cassia, clove; piperine from black pepper; gingerol from ginger; myristicin from nutmeg; turmerone from turmeric and vanillin from vanilla. Oleoresins are 5-20 times stronger in flavour than their corresponding spices and they are commonly marketed as spice drops (Ravindran and Kallupurackal, 2012;Suderman, 2011 ). Oleoresin market by segment is shown in Table 1. Journal of Natural Sciences Research www.iiste.org ISSN 2224-3186 (Paper) ISSN 2225-0921 (Online) Vol.12, No.22, 2021 (cardamom, floral, vanilla, tamarind, nutmeg, onion, and garlic) According to MarketsandMarkets, the global oleoresins market is estimated to be valued at USD 1.2 billion in 2019 and is projected to reach USD 1.7 billion by 2025, recording a CAGR of 6.0% from 2019 to 2025. The rising trend of using natural flavors in processed food and an increasing number of quick service restaurants have led to a surge in demand for oleoresins. Among the different applications, the market is estimated to be dominated by the food & beverages segment from 2019 to 2025, owing to the wide usage of oleoresins that provide a natural flavor and fragrance in confectioneries and beverages. Ginger and Oleoresin 2.1 Ginger rhizome Ginger has a slightly biting and hot note; its aroma is rich, sweet, warm with a distinctive woody olfactory note. Nigeria is the third-largest producer of ginger in the world (after India and China). The estimated global production shares for the top three ginger producing countries in the world are 35 %, 18 % and 11.5 % for India, China and Nigeria respectively. According to FAOSTAT, top ginger exporters in 2018 were China, Thailand, Netherlands, Peru and Nigeria. China had 55 % market share worth USD 525 million, Thailand with 8.9 5 % market share worth USD 83.8 million, Netherlands with 7.7 % market share worth USD 72.8 million, Peru with 4.95 % market share worth USD 46.6 million and Nigeria with 3.98 % market share worth USD 37.4 million. Also, top ginger importers in 2018 were the United States of America (USD 114 million), Pakistan (USD 107 million USD 107 million), Japan (USD 79.2 million) and Germany (USD 55.7 million). Other major importing countries are the United Kingdom, Saudi Arabia, Singapore, Malaysia, Korea, the Netherlands, Canada and France. Worldwide ginger production in 2019 reached 4 million tonnes with a planted area of 385,000 hectares. The Asia continent topped the chart with production of 3.2 million tonnes, followed by Africa with about 811,000 tonnes, America with 37,000 tonnes and Oceanic with about 9,000 tonnes. Global Ginger Market is expected to reach US$ 8.46 Billion by the end of the year 2027 from US$ 6.82 Billion in 2020, growing with a CAGR of 3.13% from 2021 to 2027 (Research and Markets, 2021). According to FAOSTAT, Nigeria ranked 2 nd amongst the top 10 ginger producers in the world with a production of 691,239 tonnes (Table 2). Vol.12, No.22, 2021 preserved ginger and dried (split and whole). The most important form of ginger commercially is the dried one because it can be further processed in the industry to ginger powder, ginger oil and ginger oleoresin. For a good yield of oleoresin and ginger oil, ginger should be harvested at 7 -8 months after planting (Kausha, et al., 2017). Ginger based products have wide range applications in many industries like food and beverage, pharmaceutical and cosmetics, confectionery, tobacco processing, etc. There is also increasing demand for ginger tea and ginger health drinks. Nigeria's ginger is among the best in the world, with its aroma, pungency and high oil and oleoresin content as distinct features (). The main producing zone is Kaduna state, and to a lesser extent Nasarawa, Niger, Gombe, Bauchi, and Benue. Ginger Oleoresin Ginger Oleoresin is a dark red-brown, viscous material with a natural, unique ginger aroma. Commercial ginger oleoresin usually has a volatile oil content of 25-30 % and replacement strength of 1 kg oleoresin for 28 kg good-quality ground spice. There is an increasing international demand for ginger oleoresins, especially for the production of alcoholic beverages, ginger ale and gingerbread. It is also used in sweets, curry powders, soft drinks, sauces and it is in huge demand in the Ayurvedic, nutraceutical and pharmaceutical industries. The global ginger oil market is expected to grow at a CAGR of 9.41 % from 2020 to 2025 to reach a total market size of US$189.431 million by 2025, increasing from US$110.435 million (Research and Markets, 2020). Nigeria, with the volume of ginger production and distinctive features of its ginger, could tap into this global market demand. However, the level of value addition to ginger in Nigeria is very low. Ginger is exported from Nigeria in the split-dried form while value-added products such as ginger powder, essential oils and oleoresin are imported at high cost. According to Nigeria Customs Service data, 700,891 kg of resinoids and mixtures of odoriferous substances were imported to Nigeria from 2016 to 2019 at a value of 1.24 billion Naira ( Table 3). Source: Nigeria Customs Service, 2020 Import volume and value of Resinoids and Mixtures of odoriferous substances by year are shown in Figures 1 and 2 -Also, ginger splitting machine and ginger drying equipment were designed and developed in collaboration with the National Root Crops Research Institute (NRCRI), Umudike, Abia State and Farm Industries, Owerri, Imo State. The machines were deployed to Ginger Growers Association in Mbaitoli Local Government Area of Imo Sate for commercial ginger processing. Improved varieties of ginger rhizomes (UG2) have been distributed to Ginger Farmers Association in selected States of Nigeria to promote cultivation and increase yield. CONCLUSION AND RECOMMENDATIONS 4.1 Recommendations The demand for ginger oleoresin in Nigeria is on the rise from both food and Pharmaceutical industries but current demand is not being met locally, leading to import dependence. Nigeria's ginger is among the best in the world, with its aroma, pungency and high oil and oleoresin content as distinct features. Therefore, the country has huge potentials for oleoresin production and marketing. To take advantage of the ever expanding global oleoresin market, the following strategies are recommended. -Formation of Clusters/Cooperatives: Farmers should be encouraged to form clusters/cooperatives for easier access to funds, improved crops varieties and other farm inputs from the government and development agencies. - Farmers-Processors Linkages: There should be linkages between farmers and processors for the offtake of the commodity at agreed price to mitigate harvest risk. The overall goal is to get more farmers involved in the cultivation of ginger for sustainable supply of raw materials to existing and emerging oleoresin extraction plants. -Tariff: Oleoresin import should be discouraged through tariff and other measures to encourage local production. Conclusion The global Ginger Oleoresin market is anticipated to rise at a considerable rate between 2021 and 2026. Despite the fact that Nigeria has competitive advantage in the production of ginger for oleoresin extraction, the country still depends on importation of oleoresin for industrial use. Therefore, there is need for synergy amongst government agencies to harmonize and integrate various development plans and strategies for the growth of the sector. An institutional framework should be established for proper coordination. Also, the right incentives for investment in the oleoresin sector should be created while the existing ones should be implemented judiciously to attract investors to the sector. The production of ginger oleoresin in Nigeria would undoubtedly create more value for the commodity in the industry and enhance its economic potentials.
Fiber intakes and anthropometric measures are predictors of circulating hormone, triglyceride, and cholesterol concentrations in the women's health trial. The unhealthy eating patterns and obesity among women in the U.S. are indicated by changes in biomarkers, such as insulin, lipoproteins, and estradiol, that are risk factors for breast cancer and cardiovascular diseases. This article models the inter-relations among diet, serum insulin, estradiol, and sex hormone binding globulin (SHBG) concentrations, plasma LDL and HDL cholesterol, and net triglyceride concentrations, using the data at baseline and 12 mo on 379 and 615 postmenopausal women in the Control and Intervention groups, respectively, of the Women's Health Trial: Feasibility Study in Minority Populations. Subjects in the Intervention group received detailed advice over a period of 1 y for reducing fat intakes and increasing the consumption of whole grains and fruits and vegetables. The main findings were that there were significant differences between the Control and Intervention groups in the changes from baseline to 12 mo in LDL and HDL cholesterol and SHBG concentrations. Second, using a comprehensive random effects modeling framework, the ratio of fiber to energy intake was significantly associated (P < 0.05) with lower insulin and triglyceride levels, and with a higher HDL cholesterol concentration in the Intervention group. Third, the subjects' waist-to-hip ratio and BMI were significantly associated with insulin, SHBG, LDL and HDL cholesterol, and triglyceride concentrations. Fourth, insulin levels were significantly negatively associated with SHBG and HDL cholesterol, and positively associated with LDL cholesterol, triglyceride, and estradiol concentrations. Overall, weight loss, especially around the waist, and increased fiber intakes are likely to be beneficial for lipid, cholesterol, and hormone profiles of U.S. women.
// // dcViewController.h // lol // // Created by xuty on 10/17/2016. // Copyright (c) 2016 xuty. All rights reserved. // @import UIKit; @interface dcViewController : UIViewController @end
// NewClient returns a new client with the provided config // // nick is the username // pass is the auth pass which starts with oauth: func NewClient(nick string, pass string, conf *Config) *Client { return &Client{ nick: nick, pass: pass, config: conf, } }
Redundant Encoding Strengthens Segmentation and Grouping in Visual Displays of Data The availability and importance of data are accelerating, and our visual system is a critical tool for understanding it. The research field of data visualization seeks design guidelinesoften inspired by perceptual psychologyfor more efficient visual data analysis. We evaluated a common guideline: When presenting multiple sets of values to a viewer, those sets should be distinguished not just by a single feature, such as color, but redundantly by multiple features, such as color and shape. Despite the broad use of this practice across maps and graphs, it may carry costs, and there is no direct evidence for a benefit. We show that this practice can indeed yield a large benefit for rapidly segmenting objects within a dense display (Experiments 1 and 2), and strengthening visual grouping of display elements (Experiment 3). We predict situations where this benefit might be present, and discuss implications for models of attentional control.
Showtime has announced the premiere dates for its Sunday night dramas, with Homeland and The Affair once again sharing “terse conversations, leading to horrible disaster” duties when they return on October 4. Homeland will be coming back for its fifth season, which is expected to see a no-longer-CIA-affiliated Carrie Mathison (Claire Danes) working in Germany as a member of a private security firm. The show, which picked up five Emmy nominations this year, including Outstanding Drama Series and a Best Lead Actress In A Drama nod for Danes, will air at 9 p.m., leading in to The Affair at 10. It’s not clear what sophomore-season tricks the latter show—which made its way onto our Best TV of 2014 list on the strength of its cast and an engaging shifting-perspectives conceit—will be pulling when it returns. But audiences can probably rest safe in the knowledge that whatever happens between Dominic West and Ruth Wilson’s mutually dallying protagonists, it’ll involve infidelity, emotional entanglement, and a healthy dose of he-said/she-said camera work to keep things nice and ambiguous for the foreseeable future.
Posti, the country’s mail service, will start offering the service next month in an effort to raise money. These days, the snail-mail business is, gently put, not very lucrative. In a world of email and text messaging, and an enormous array of mobile-communication apps, national mail providers have had to get creative to make money as mail volume shrinks and commercial delivery companies beat them to the front door. For Finland, one new strategy involves delivering something other than mail. Starting next month, Posti, the country’s postal service, will start mowing their customers’ lawns. “We believe many customers will be happy to outsource lawn mowing when we make it convenient for them to do so,” said Anu Punola, the director at Posti, in a statement announcing the pilot program last week. Postal workers will mow Finns’ lawns on Tuesdays “due to the lower volume of advertisements and publications distributed on that day” between mid-May and August this year. Customers can order the service online and must provide their own lawnmower. Thirty-minute lawn-mowing sessions cost 65 euros, or about $74, per month, and 60-minute sessions are 130 euros, or about $147, per month. The icing on the cake: It’s tax-deductible. Punola said the idea for the service came from postal workers themselves, who sound like they don’t have as much to do now that fewer people are using direct mail. Posti, like mail providers in other countries, is losing money each year; last year, the agency reported losses of 75 million euros, or about $85 million. The advent of digital communication, it said, has now driven overall delivery volumes to levels seen in the 1960s. In India, the national postal service launched a program last August that would help farmers sell their good. India Post dispatched workers to villages to get information about produce farmers want to sell and then upload it online for traders to see. The postal service would then charge the buyers a fee to deliver the purchases. In the United States, the national postal service partnered with Amazon.com to deliver groceries in 2014. Earlier this year, the U.S. Post Office reported its ninth consecutive billion-dollar loss—$5.1 billion in 2015. This month, the price of U.S. stamps decreased from 49 cents to 47 cents as part of a deal struck with Congress that allowed the postal service to increase the price by 3 cents back in 2014 to help it boost revenue. The reduction, said Megan Brennan, the postmaster general, will lead the agency to lose approximately $2 billion.
Variation in perceptions of treatment and self-care practices in elderly with osteoarthritis: a comparison between African American and white patients. OBJECTIVE To compare elderly African American and white patients with osteoarthritis of the knee or hip with respect to their perceptions of the efficacy of traditional and complementary treatments and their self-care practices. METHODS An observational, cross-sectional study design using structured questionnaires was employed. RESULTS The sample consisted of 593 patients (44% African American and 56% white). The 2 groups were comparable with respect to age, disease severity or functional status, and comorbidities. African Americans were more likely than whites to report lower educational level and household income. African Americans were also more likely than whites to perceive various traditional and complementary care modalities as efficacious. However, they were less likely than whites to perceive joint replacement therapy as efficacious (odds ratio 0.52, 95% confidence interval 0.28-0.98). African American patients were more likely than white patients to rely on self-care measures for their arthritis. CONCLUSION African American and white patients with osteoarthritis of the knee or hip differ with respect to their perceptions of traditional and complementary treatments for arthritis and their self-care practices.
Study on the Application of Shell-Activated Carbon for the Adsorption of Dyes and Antibiotics In this study, we prepared homemade fruit shell-activated carbon (SAC) with efficient adsorption of new pollutants and used it in the removal of methylene blue dye (MB) and ofloxacin antibiotic (OFL) in water. We fitted the experimental data for MB and OFL adsorption with isothermal and kinetic models and performed extensive characterization to study the properties of SAC. We also studied the effects of solution pH, dosage amount, initial concentration, and coexisting ions on the adsorption capacity. The results show that SAC has a rich pore structure, and electrostatic interactions are its main adsorption mechanism. Adjusting the solution pH by changing the SAC dosage and removing the K+, SO42−, and Cu2+ could increase the removal of MB and OFL to 99.9% and 97.6%, respectively. In addition, the adsorption capacity of SAC for MB remained at more than 50% of the initial state after three iterations of adsorption regeneration, showing a good regeneration ability. These results show the potential of SAC in replacing conventional activated carbon to remove new pollutants.
Smart Doorplates - toward an autonomic computing The last three decades proved Moore's Law. We witnessed an exponential increase in processing power, memory capacity and communication bandwidth and we expect this increase to continue for at least another decade. The effect is a growing complexity of computer systems and the need for highly qualified administrators. The question must be posed how computer systems can be managed in future if we project the actual progression in systems intricacy. This paper focuses on autonomic computing as a potential solution. We describe how a distributed system can be built to satisfy the demands for self-configuration, self-healing, context awareness and anticipation. Furthermore we describe our application example - Smart Doorplates - and the appliance of the previously discussed demands to that system.
Combinations of the presence or absence of cerebral microbleeds and advanced white matter hyperintensity as predictors of subsequent stroke types. BACKGROUND AND PURPOSE Previous studies have shown microbleeds to be a risk factor for intracerebral hemorrhage and white matter hyperintensity (WMH) to be a risk factor for ischemic stroke. This study was performed to determine whether combinations of the presence or absence of microbleeds and advanced WMH are risk factors for subsequent recurrent stroke types. METHODS In 266 patients with stroke, microbleeds on T2*-weighted MR images were counted, and WMH on T2-weighted images was graded. Patients were divided into 4 groups by the combinations of the presence or absence of microbleeds and advanced WMH and were followed up for stroke recurrence. RESULTS During a mean follow-up period of 564.8 +/- 220.5 days, 26 patients developed recurrent strokes, including 10 intracerebral hemorrhages and 16 ischemic strokes. Patients with microbleeds without advanced WMH (n = 42) developed only intracerebral hemorrhages (n = 8), and the recurrence rate of intracerebral hemorrhage in those patients estimated by the Kaplan-Meier method was the highest in the 4 groups (14.3% in 1 year and 21.2% in 2 years). In contrast, patients with advanced WMH without microbleeds (n = 39) developed only ischemic strokes (n = 6), and the estimated recurrent rate of ischemic stroke in those patients was the highest in the 4 groups (10.5% in 1 year and 17.4% in 2 years). Cox proportional hazards regression analysis revealed that microbleeds were associated with intracerebral hemorrhage (hazard ratio , 85.626; 95% confidence interval , 6.344-1155.649) and that advanced WMH was negatively associated with intracerebral hemorrhage (HR, 0.016; 95% CI, 0.001-0.258). Advanced WMH was associated with ischemic stroke (HR, 10.659; 95% CI, 2.601-43.678). CONCLUSION It appears that patients at high risk of subsequent intracerebral hemorrhage or ischemic stroke can be identified by combinations of the presence or absence of microbleeds and advanced WMH.
A survey on wireless sensor network: An application perspective Wireless sensor network are very essential in real time exploration of data with less latency comparing to wireless network. The function of wireless networks can't be compared with sensor network, though the usage makes the difference. In this paper, the wireless sensor network is viewed in aspects of features, environment and application. The motivation of this paper is to give an overall coverage of applications that can be bridge for a new beginning.
Water relations and hydraulic control of stomatal behaviour in bell pepper plant in partial soil drying Two experiments, a split-root experiment and a root pressurizing experiment, were performed to test whether hydraulic signalling of soil drying plays a dominant role in controlling stomatal closure in herbaceous bell pepper plants. In the split-root experiment, when both root parts were dried, synchronous decreases in stomatal conductance (g s ), leaf water potential (LWP) and stem sap flow (SF stem ) were observed. The value of g s was found to be closely related to soil water potential (SWP) in both compartments. Tight relationships were observed between g, and stem sap flow under all conditions of water stress, indicating a complete stomatal adjustment of transpiration. When the half-root system has been dried to the extent that its water uptake dropped to almost zero, declines in g s of less than 20% were observed without obvious changes in LWP. The reduced plant hydraulic conductance resulting from decreased sap flow and unchanged LWP may be a hydraulic signal controlling stomatal closure; the results of root pressurizing supported this hypothesis. Both LWP and g, in water-stressed plants recovered completely within 25 min of the application of root pressurizing, and decreased significantly within 40 min after pressure release, indicating the hydraulic control of stomatal closure. Our results are in contrast to those of other studies on other herbaceous species, which suggested that chemical messengers from the roots bring about stomatal closure when plants are in water stress.
use std::cmp; use std::fs::Metadata; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::fs::File; #[cfg(unix)] use std::os::unix::fs::MetadataExt; use async_trait::async_trait; use bitflags::bitflags; use headers::*; use mime_guess::from_path; use super::{ChunkedState, FileChunk}; use crate::http::header::{self, CONTENT_DISPOSITION, CONTENT_ENCODING}; use crate::http::{HttpRange, Request, Response, StatusCode, StatusError}; use crate::{Depot, Error, Result, Writer}; const CHUNK_SIZE: u64 = 1024 * 1024; bitflags! { pub(crate) struct Flags: u8 { const ETAG = 0b0000_0001; const LAST_MODIFIED = 0b0000_0010; const CONTENT_DISPOSITION = 0b0000_0100; } } impl Default for Flags { fn default() -> Self { Flags::all() } } /// A file with an associated name. #[derive(Debug)] pub struct NamedFile { path: PathBuf, file: File, modified: Option<SystemTime>, buffer_size: u64, metadata: Metadata, flags: Flags, content_type: mime::Mime, content_disposition: HeaderValue, content_encoding: Option<HeaderValue>, } /// Builder for build [`NamedFile`]. #[derive(Clone)] pub struct NamedFileBuilder { path: PathBuf, attached_filename: Option<String>, disposition_type: Option<String>, content_type: Option<mime::Mime>, content_encoding: Option<String>, content_disposition: Option<String>, buffer_size: Option<u64>, flags: Flags, } impl NamedFileBuilder { /// Set attached filename and returns `Self`. #[inline] pub fn with_attached_filename<T: Into<String>>(mut self, attached_filename: T) -> Self { self.attached_filename = Some(attached_filename.into()); self } /// Set disposition encoding and returns `Self`. #[inline] pub fn with_disposition_type<T: Into<String>>(mut self, disposition_type: T) -> Self { self.disposition_type = Some(disposition_type.into()); self } /// Set content type and returns `Self`. #[inline] pub fn with_content_type<T: Into<mime::Mime>>(mut self, content_type: T) -> Self { self.content_type = Some(content_type.into()); self } /// Set content encoding and returns `Self`. #[inline] pub fn with_content_encoding<T: Into<String>>(mut self, content_encoding: T) -> Self { self.content_encoding = Some(content_encoding.into()); self } /// Set buffer size and returns `Self`. #[inline] pub fn with_buffer_size(mut self, buffer_size: u64) -> Self { self.buffer_size = Some(buffer_size); self } /// Specifies whether to use ETag or not. /// /// Default is true. #[inline] pub fn use_etag(mut self, value: bool) -> Self { self.flags.set(Flags::ETAG, value); self } /// Build a new `NamedFile` and send it. pub async fn send(self, req: &mut Request, res: &mut Response) { if !self.path.exists() { res.set_status_error(StatusError::not_found()); } else { match self.build().await { Ok(file) => file.send(req, res).await, Err(_) => res.set_status_error(StatusError::internal_server_error()), } } } /// Build a new [`NamedFile`]. pub async fn build(self) -> Result<NamedFile> { let NamedFileBuilder { path, content_type, content_encoding, content_disposition, buffer_size, disposition_type, attached_filename, flags, } = self; let file = File::open(&path).await?; let content_type = content_type.unwrap_or_else(|| { let ct = from_path(&path).first_or_octet_stream(); let ftype = ct.type_(); let stype = ct.subtype(); if (ftype == mime::TEXT || stype == mime::JSON || stype == mime::JAVASCRIPT) && ct.get_param(mime::CHARSET).is_none() { //TODO: auto detect charset format!("{}; charset=utf-8", ct).parse::<mime::Mime>().unwrap_or(ct) } else { ct } }); let content_disposition = content_disposition.unwrap_or_else(|| { disposition_type.unwrap_or_else(|| { let disposition_type = if attached_filename.is_some() { "attachment" } else { match (content_type.type_(), content_type.subtype()) { (mime::IMAGE | mime::TEXT | mime::VIDEO | mime::AUDIO, _) | (_, mime::JAVASCRIPT | mime::JSON) => "inline", _ => "attachment", } }; if disposition_type == "attachment" { let filename = match attached_filename { Some(filename) => filename, None => path .file_name() .map(|filename| filename.to_string_lossy().to_string()) .unwrap_or_else(|| "file".into()), }; format!("attachment; filename={}", filename) } else { disposition_type.into() } }) }); let content_disposition = content_disposition.parse::<HeaderValue>().map_err(Error::other)?; let metadata = file.metadata().await?; let modified = metadata.modified().ok(); let content_encoding = match content_encoding { Some(content_encoding) => Some(content_encoding.parse::<HeaderValue>().map_err(Error::other)?), None => None, }; Ok(NamedFile { path, file, content_type, content_disposition, metadata, modified, content_encoding, buffer_size: buffer_size.unwrap_or(CHUNK_SIZE), flags, }) } } impl NamedFile { /// Create new [`NamedFileBuilder`]. #[inline] pub fn builder(path: impl Into<PathBuf>) -> NamedFileBuilder { NamedFileBuilder { path: path.into(), attached_filename: None, disposition_type: None, content_type: None, content_encoding: None, content_disposition: None, buffer_size: None, flags: Flags::default(), } } /// Attempts to open a file in read-only mode. /// /// # Examples /// /// ``` /// # use salvo_core::fs::NamedFile; /// # async fn open() { /// let file = NamedFile::open("foo.txt").await; /// # } /// ``` #[inline] pub async fn open(path: impl Into<PathBuf>) -> Result<NamedFile> { Self::builder(path).build().await } /// Attempts to send a file. If file not exists, not found error will occur. pub async fn send_file(path: impl Into<PathBuf>, req: &mut Request, res: &mut Response) { let path = path.into(); if !path.exists() { res.set_status_error(StatusError::not_found()); } else { match Self::builder(path).build().await { Ok(file) => file.send(req, res).await, Err(_) => res.set_status_error(StatusError::internal_server_error()), } } } /// Returns reference to the underlying `File` object. #[inline] pub fn file(&self) -> &File { &self.file } /// Retrieve the path of this file. #[inline] pub fn path(&self) -> &Path { self.path.as_path() } /// Get content type value. #[inline] pub fn content_type(&self) -> &mime::Mime { &self.content_type } /// Set the MIME Content-Type for serving this file. By default /// the Content-Type is inferred from the filename extension. #[inline] pub fn set_content_type(&mut self, content_type: mime::Mime) { self.content_type = content_type; } /// Get Content-Disposition value. #[inline] pub fn content_disposition(&self) -> &HeaderValue { &self.content_disposition } /// Set the `Content-Disposition` for serving this file. This allows /// changing the inline/attachment disposition as well as the filename /// sent to the peer. /// /// By default the disposition is `inline` for text, /// image, and video content types, and `attachment` otherwise, and /// the filename is taken from the path provided in the `open` method /// after converting it to UTF-8 using /// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy). #[inline] pub fn set_content_disposition(&mut self, content_disposition: HeaderValue) { self.content_disposition = content_disposition; self.flags.insert(Flags::CONTENT_DISPOSITION); } /// Disable `Content-Disposition` header. /// /// By default Content-Disposition` header is enabled. #[inline] pub fn disable_content_disposition(&mut self) { self.flags.remove(Flags::CONTENT_DISPOSITION); } /// Get content encoding value reference. #[inline] pub fn content_encoding(&self) -> Option<&HeaderValue> { self.content_encoding.as_ref() } /// Set content encoding for serving this file #[inline] pub fn set_content_encoding(&mut self, content_encoding: HeaderValue) { self.content_encoding = Some(content_encoding); } /// Get ETag value. pub fn etag(&self) -> Option<ETag> { // This etag format is similar to Apache's. self.modified.as_ref().and_then(|mtime| { let ino = { #[cfg(unix)] { self.metadata.ino() } #[cfg(not(unix))] { 0 } }; let dur = mtime .duration_since(UNIX_EPOCH) .expect("modification time must be after epoch"); let etag_str = format!( "\"{:x}-{:x}-{:x}-{:x}\"", ino, self.metadata.len(), dur.as_secs(), dur.subsec_nanos() ); match etag_str.parse::<ETag>() { Ok(etag) => Some(etag), Err(e) => { tracing::error!(error = ?e, etag = %etag_str, "set file's etag failed"); None } } }) } ///Specifies whether to use ETag or not. /// ///Default is true. #[inline] pub fn use_etag(mut self, value: bool) { self.flags.set(Flags::ETAG, value); } /// GEt last_modified value. #[inline] pub fn last_modified(&self) -> Option<SystemTime> { self.modified } ///Specifies whether to use Last-Modified or not. /// ///Default is true. #[inline] pub fn use_last_modified(mut self, value: bool) -> Self { self.flags.set(Flags::LAST_MODIFIED, value); self } ///Consume self and send content to [`Response`]. pub async fn send(self, req: &mut Request, res: &mut Response) { let etag = if self.flags.contains(Flags::ETAG) { self.etag() } else { None }; let last_modified = if self.flags.contains(Flags::LAST_MODIFIED) { self.last_modified() } else { None }; // check preconditions let precondition_failed = if !any_match(etag.as_ref(), req) { true } else if let (Some(ref last_modified), Some(since)) = (last_modified, req.headers().typed_get::<IfUnmodifiedSince>()) { !since.precondition_passes(*last_modified) } else { false }; // check last modified let not_modified = if !none_match(etag.as_ref(), req) { true } else if req.headers().contains_key(header::IF_NONE_MATCH) { false } else if let (Some(ref last_modified), Some(since)) = (last_modified, req.headers().typed_get::<IfModifiedSince>()) { !since.is_modified(*last_modified) } else { false }; res.headers_mut() .insert(CONTENT_DISPOSITION, self.content_disposition.clone()); res.headers_mut() .typed_insert(ContentType::from(self.content_type.clone())); if let Some(lm) = last_modified { res.headers_mut().typed_insert(LastModified::from(lm)); } if let Some(etag) = self.etag() { res.headers_mut().typed_insert(etag); } res.headers_mut().typed_insert(AcceptRanges::bytes()); let mut length = self.metadata.len(); if let Some(content_encoding) = &self.content_encoding { res.headers_mut().insert(CONTENT_ENCODING, content_encoding.clone()); } let mut offset = 0; // check for range header // let mut range = None; if let Some(ranges) = req.headers().get(header::RANGE) { if let Ok(rangesheader) = ranges.to_str() { if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) { length = rangesvec[0].length; offset = rangesvec[0].start; } else { res.headers_mut().typed_insert(ContentRange::unsatisfied_bytes(length)); res.set_status_code(StatusCode::RANGE_NOT_SATISFIABLE); return; }; } else { res.set_status_code(StatusCode::BAD_REQUEST); return; }; } if precondition_failed { res.set_status_code(StatusCode::PRECONDITION_FAILED); return; } else if not_modified { res.set_status_code(StatusCode::NOT_MODIFIED); return; } if offset != 0 || length != self.metadata.len() { res.set_status_code(StatusCode::PARTIAL_CONTENT); match ContentRange::bytes(offset..offset + length - 1, self.metadata.len()) { Ok(content_range) => { res.headers_mut().typed_insert(content_range); } Err(e) => { tracing::error!(error = ?e, "set file's content ranage failed"); } } let reader = FileChunk { offset, chunk_size: cmp::min(length, self.metadata.len()), read_size: 0, state: ChunkedState::File(Some(self.file.into_std().await)), buffer_size: self.buffer_size, }; res.headers_mut().typed_insert(ContentLength(reader.chunk_size)); res.streaming(reader).ok(); } else { res.set_status_code(StatusCode::OK); let reader = FileChunk { offset, state: ChunkedState::File(Some(self.file.into_std().await)), chunk_size: length, read_size: 0, buffer_size: self.buffer_size, }; res.headers_mut().typed_insert(ContentLength(length - offset)); res.streaming(reader).ok(); } } } #[async_trait] impl Writer for NamedFile { async fn write(mut self, req: &mut Request, _depot: &mut Depot, res: &mut Response) { self.send(req, res).await; } } impl Deref for NamedFile { type Target = File; fn deref(&self) -> &File { &self.file } } impl DerefMut for NamedFile { fn deref_mut(&mut self) -> &mut File { &mut self.file } } /// Returns true if `req` has no `If-Match` header or one which matches `etag`. fn any_match(etag: Option<&ETag>, req: &Request) -> bool { match req.headers().typed_get::<IfMatch>() { None => true, Some(if_match) => { if if_match == IfMatch::any() { true } else if let Some(etag) = etag { if_match.precondition_passes(etag) } else { false } } } } /// Returns true if `req` doesn't have an `If-None-Match` header matching `req`. fn none_match(etag: Option<&ETag>, req: &Request) -> bool { match req.headers().typed_get::<IfMatch>() { None => true, Some(if_match) => { if if_match == IfMatch::any() { false } else if let Some(etag) = etag { !if_match.precondition_passes(etag) } else { true } } } }
The use of measured genotype information in the analysis of quantitative phenotypes in man. II. The role of the apolipoprotein E polymorphism in determining levels, variability, and covariability of cholesterol, betalipoprotein, and triglycerides in a sample of unrelated individuals. Recent advances in molecular biology provide measures of genotypes at loci involved in lipid metabolism. Genotypes for apolipoprotein E (apo E) and quantitative levels of total plasma cholesterol, betalipoprotein, and triglycerides were measured in a sample of 223 unrelated individuals from Nancy, France. The frequencies of the epsilon 2, epsilon 3, and epsilon 4 alleles are 0.13, 0.74, and 0.13, respectively, in this sample. Significant differences among apo E genotypes were detected for these lipoprotein phenotypes. The average effect of the epsilon 2 allele was to reduce total plasma cholesterol and betalipoprotein levels by 0.52 mmol/L and 0.98, respectively, while the epsilon 4 allele raised these levels by 0.26 mmol/L and 0.61, respectively. Apo E genotype specific correlations suggest that this locus also has an effect on the coordinated metabolism between cholesterol and triglycerides. We infer that approximately 17% of the genetic variability in total plasma cholesterol may be attributable to this apo E polymorphism. No other single locus has been identified with such a large contribution to cardiovascular disease risk factors in the general population.
class NetfilterRule: """Rule that has a match and a target.""" def __init__(self, match: 'NetfilterMatch', target: 'NetfilterTarget'): self.chain: Optional['NetfilterChain'] = None self.match = match self.target = target def get_target_if_match(self, context: 'NetfilterContext') -> Optional['NetfilterTarget']: if not self.match.match(context): return None return self.target
Picking Up the Clues Abstract This article examines the experience of adults with un-diagnosed learning disabilities (ULD) and focuses on how ULD impacts development, deficits, and disavowal. The chasm, an experience of fragmentation in the face of learning failures is discussed. The emotional consequences of ULD become a counseling issue. Though many learning problems are environmentally based, many are innate, and the two types call for different treatment strategies. Self Psychology is an especially useful psychodynamic model, since many individuals with ULD seem to suffer from self problems; low self-esteem, repressed archaic grandiosity, empty depression, and tendency to extreme shame, and fragmentation states. Clearly, innate deficits influence self-states.
WASHINGTON(AFP) - The top United States negotiator in talks with Iran on curtailing its nuclear program will leave her post after a June 30 deadline for agreement, according to the New York Times. Wendy Sherman, 65, broke the news to staffers on Wednesday then left on her latest trip for talks with Iran in Switzerland, the paper said. "It's been two long years," the Times quoted Sherman as saying. Her post is under secretary of state for policy. It added that, with the departure of Sherman, all the top US officials who have negotiated with Iran over that span will have left President Barack Obama's administration. On Saturday, US Secretary of State John Kerry will once again meet with his Iranian counterpart Mohammad Javad Zarif in Geneva, after weeks of behind-the-scenes technical discussions in Vienna seeking to narrow the gaps on what would be an unprecedented deal on curtailing Iran's nuclear program. Iran and the six global powers leading the talks - Britain, China, France, Germany, Russia and the United States - laid down a framework to guide the final accord in eight days of marathon late-night talks in Lausanne in early April. Kerry and his team will now return to Europe for what is expected to be a final series of meetings with Zarif as the clock ticks down to June 30 and a possible deal putting a nuclear bomb beyond Iran's reach for longer.
JSH Guidelines for the Management of Hepatitis C Virus Infection: A 2016 update for genotype 1 and 2 THE JAPAN SOCIETY of Hepatology and Drafting Committee for Hepatitis Management Guidelines produced the first clinical practice guidelines for the management of hepatitis C virus (HCV) infection in 2012, followed by frequent updates. As English versions, we published JSH guidelines in 2013, and a 2014 update for genotype 1 in 2014. Thereafter several interferon-free regimens with direct acting antivirals (DAAs) have been launched in the clinical setting both for genotype 1 and 2 and treatment recommendations have been greatly changed with these progresses. In this year 2016, the Drafting Committee for Hepatitis Management Guidelines lauched a 2016 update for genotype 1 and 2. These JSH guidelines are intended to assist physicians and other healthcare providers to assist their decision making in the clinical process. The Committee defenitely hope these guideline help patients infected with HCV, their families and other interested individuals to overcome HCV infection and improve the outcome and quality of life with assistance of physicians and other healthcare providers. In these updated version, we focused on newly-available IFN-free DAAs and the current treatment recommendations. Please refer to the previous versions when IFN, ribavirin, and other IFN-based DAAs (telaprevir, simeprevir) are of interst. Correspondence: Atsushi Tanaka, Department of Medicine, Teikyo University School of Medicine, 2-11-1, Kaga, Itabashi-ku, Tokyo 173-8605. Email: [email protected] Conflicts of Interest: Conflicts of interest of the Hepatitis C Treatment Guidelines (Third Version)Hepatitis Treatment Guideline Preparation Committee Members 1 Financial compensation (1,000,000 yen or more per year from a single company/organization) None 2 Profits from shares (1,000,000 yen or more, or 5% or more of said shares, per year from a single company)No stock owned 3 Patent use fees (1,000,000 yen or more per year for a single patent) SRL Inc. 4 Speaking fees (1,000,000 yen or more per year from a single company or organization) MSD, Dainippon Sumitomo Pharma Company, Ltd., Bristol-Myers K.K., Mitsubishi Tanabe Pharma Corporation, Toray Industries, Janssen Pharmaceutica, Chugai Pharmaceutical Co., Ltd., Daiichi Sankyo Pharmaceutical Co., Ltd., Bayer Yakuhin, Ltd. 5 Manuscript fees (1,000,000 yen or more per year from a single company or organization) None 6 Total amount of research fees, grants, etc. (2,000,000 yen or more per year total paid from affiliated departments in a single company or organization sharing research expenses (e.g., courses, fields, or laboratories)) None *Drafting Committee for Hepatitis Management Guidelines, the Japan Society of Hepatology (in alphabetical order):Yasuhiro Asahina, Department of Gastroenterology and Hepatology, School of Medicine, Tokyo Medical and Dental University; Namiki Izumi, Department of Gastroenterology and Hepatology, Musashino Red Cross Hospital; Kumada Hiromitsu, Department of Hepatology, Toranomon Hospital; Masayuki Kurosaki, Department of Gastroenterology and Hepatology, Musashino Red Cross Hospital; **Kazuhiko Koike, Department ofGastroenterology, Graduate School of Medicine, The University of Tokyo; Fumitaka Suzuki, Department of Hepatology, Toranomon Hospital; * Hajime Takikawa, Department of Medicine, Teikyo University School of Medicine; Atsushi Tanaka, Department of Medicine, Teikyo University School of Medicine; Eiji Tanaka, The Second Department of Internal Medicine, ShinshuUniversity School ofMedicine; Yasuhito Tanaka, Department of Virology and Liver Unit, Nagoya City University Graduate School of Medical Science;Hirohito Tsubouchi, KagoshimaCityHospital, Norio Hayashi, Kansai-Rosai Hospital; Naoki Hiramatsu, Department of Gastroenterology and Hepatology, Osaka University Graduate School of Medicine; Hiroshi Yotsuyanagi, Department of Infectious Diseases, Graduate School of Medicine, The University of Tokyo (* Committee chair; ** Special committee member) 7 Total amount of scholarship (support) payments received (2,000,000 yen or more per year paid by affiliated departments (e.g., courses, fields, laboratories) in a single company or organization sharing a scholarship budget) MSD, Mitsubishi Tanabe Pharma Corporation, Chugai Pharmaceutical Co., Ltd., Daiichi Sankyo Pharmaceutical Co., Ltd. 8 Sponsored courses provided by companies, etc. (noted when there is an affiliation with a course sponsored by a company, etc.) MSD, Dainippon Sumitomo Pharma Company, Ltd., Bristol-Myers, K.K., Toray Industries, Chugai Pharmaceutical Co., Ltd. 9 Receipt of travel expenses, gifts, etc. (50,000 yen or more per year from a single company or organization) None
<filename>tests/SampleApps/nodejs/soundcloud-ngrx/src/app/users/user-service.spec.ts import { TestBed } from '@angular/core/testing'; import { Store, StoreModule } from '@ngrx/store'; import { testUtils } from 'app/utils/test'; import { createUser } from './models'; import { initialState, usersReducer } from './state/users-reducer'; import { UserActions } from './user-actions'; import { UserService } from './user-service'; describe('users', () => { describe('UserService', () => { let service: UserService; let store: Store<any>; let userActions: UserActions; beforeEach(() => { let injector = TestBed.configureTestingModule({ imports: [ StoreModule.provideStore( { users: usersReducer }, { users: initialState .set(123, createUser(testUtils.createUser(123))) .set(456, createUser(testUtils.createUser(456))) } ) ], providers: [ UserActions, UserService ] }); service = injector.get(UserService); store = injector.get(Store); userActions = injector.get(UserActions); }); describe('currentUser$ observable', () => { it('should emit the current user from UsersState', () => { let count = 0; let user = null; service.currentUser$.subscribe(value => { count++; user = value; }); // auto-emitting initial value expect(count).toBe(1); expect(user).not.toBeDefined(); // load user store.dispatch(userActions.loadUser(123)); expect(count).toBe(2); expect(user.id).toBe(123); // loading same user should not emit store.dispatch(userActions.loadUser(123)); expect(count).toBe(2); // load different user store.dispatch(userActions.loadUser(456)); expect(count).toBe(3); expect(user.id).toBe(456); // dispatching unrelated action should not emit store.dispatch({type: 'UNDEFINED'}); expect(count).toBe(3); }); }); describe('loadResource()', () => { it('should call store.dispatch() with LOAD_USER_LIKES action if resource param is `likes`', () => { spyOn(store, 'dispatch'); service.loadResource(1, 'likes'); expect(store.dispatch).toHaveBeenCalledTimes(1); expect(store.dispatch).toHaveBeenCalledWith(userActions.loadUserLikes(1)); }); it('should call store.dispatch() with LOAD_USER_TRACKS action if resource param is `tracks`', () => { spyOn(store, 'dispatch'); service.loadResource(1, 'tracks'); expect(store.dispatch).toHaveBeenCalledTimes(1); expect(store.dispatch).toHaveBeenCalledWith(userActions.loadUserTracks(1)); }); }); describe('loadUserLikes()', () => { it('should call store.dispatch() with LOAD_USER_LIKES action', () => { spyOn(store, 'dispatch'); service.loadUserLikes(1); expect(store.dispatch).toHaveBeenCalledTimes(1); expect(store.dispatch).toHaveBeenCalledWith(userActions.loadUserLikes(1)); }); }); describe('loadUserTracks()', () => { it('should call store.dispatch() with LOAD_USER_TRACKS action', () => { spyOn(store, 'dispatch'); service.loadUserTracks(1); expect(store.dispatch).toHaveBeenCalledTimes(1); expect(store.dispatch).toHaveBeenCalledWith(userActions.loadUserTracks(1)); }); }); }); });
<filename>Java/0870-Advantage-Shuffle/soln.java class Solution { public int[] advantageCount(int[] A, int[] B) { Arrays.sort(A); int n = A.length; int[][] order = new int[n][2]; for(int i = 0; i < n; ++i) { order[i][0] = B[i]; order[i][1] = i; } Arrays.sort(order, (a, b) -> Integer.compare(a[0], b[0])); int[] ans = new int[n]; int hi = n - 1, i = 0; for(int [] p : order) { int num = p[0], idx = p[1]; while (i < n && A[i] <= num) { ans[order[hi][1]] = A[i]; ++i; --hi; } if (i < n) { ans[idx] = A[i]; ++i; } else break; } return ans; } }
Version controlled file system made by Presslabs, to allow managed WordPress hosting customers to use at the same time Git and SFTP. gitFS is a FUSE file system that fully integrates with git. You can mount a remote repository’s branch locally, and any subsequent changes made to the files will be automatically committed to the remote. What’s its purpose? gitfs was designed to bring the full powers of git to everyone, irrespective of their experience using the tool. You can mount any repository, and all the changes you make will be automatically converted into commits. gitfs will also expose the history of the branch you’re currently working on by simulating snapshots of every commit. gitfs is useful in places where you want to keep track of all your files, but at the same time you don’t have the possibility of organizing everything into commits yourself. A FUSE file system for git repositories, with local cache. We open-sourced gitfs so that everyone can benefit from using a version-controlled environment without having to learn anything new. How the idea came up The idea of a git file system was conceived out of the need of users to make small changes to the project the developers were working on. Lacking the prior knowledge needed in order to use a version control system, these modifications were bringing nightmares to the development team and a solution was needed. Now you have the possibility of organizing everything into commits yourself with this FUSE file system for git. See how we’re using it Features Automatically commits changes : create, delete, update files and their metadata : create, delete, update files and their metadata Browse through the working index, and also see the entire commit history, as separate folders, organized by date. through the working index, and also see the entire commit history, as separate folders, organized by date. The system merges with upstream by automatically accepting local changes. by automatically accepting local changes. Mounts the file system as a user or a group. as a user or a group. Caching commits reduces the memory footprint and speeds up navigation. reduces the memory footprint and speeds up navigation. Reduces the number of commits by grouping pushes. Use cases Mount your web server with gitfs for easy deployment and fast rollbacks. Use gitfs on a local project to keep track of all the changes. Open an issue How to contribute Development of gitfs happens on GitHub. You are highly encouraged to contribute with code, tests, documentation, or just to share your experience. Get involved The concise contribution guide can be found in the CONTRIBUTING.md file. License This project is licensed under the Apache 2.0 license. Have a look at the LICENSE file in the top distribution directory for the complete, unabridged reference.
I recently wrote about how the exit of Mystique from the film series wouldn’t be the worst thing in the world. After touching that subject, I found myself reflecting on other core characters of this period piece trilogy. I stated that Mystique has fairly recycled sequences and now I’m finding that so does Charles Xavier. Let’s take a look. 1. X-Men- He is rendered unable to assist the team in locating Magneto and his brotherhood after he is poisoned by Mystique. He has to sit out the third act of the film due to the attack and his larger than life telepathic powers are taken out of the equation. 2. X2: X-Men United- Charles is captured by Stryker very early on in the film. His role transforms to one of being utilized to kill all of mutant kind with the aid of Cerebro. The mission of the X-Men aside from stopping Stryker is to rescue the professor… Though the context isn’t necessarily the same, the concept remains where the powers of the professor are distanced from the X-Men and their ability to rely on them. Though his victimization was different this time around, he still needed the help of his most faithful students to pull through once more. 3. X-Men the Last Stand-(Ugh). I don’t think we need to dive too deep into this one. Come on, before the show even really gets started Xavier is literally obliterated by the Phoenix. No rescue needed. He is dead as dead can be. No great telepathy on the side of the X-Men to help once more. 4. X-Men Days of Future Past- The dude wants to walk! Of course the price for that is having no telepathic powers! After all, allowing him to have them would make the conflict (and just about every other conflict in the films really) that much quicker to solve. Therefore this becomes a story of not saving Xavier from an enemy but really saving him from himself. 5. X-Men: Apocalypse- Looking at the clips presented to the audience at this year’s (awesome) San Diego Comic-Con it seems like we see Apocalypse and his Horsemen kidnapping the professor… Please don’t tell me we’re doing this again. It feels that the main function of the character is to render his omega level powers useless so that a plot can exist. In doing this, we get his mutant in distress dilemma where Xavier lacks any real punch as a character and serves more as a device to get the team moving for a purpose. So, for another trilogy, I see how he would have to be included-He’s professor X for crying out loud! But his use has become fairly predictable. Advertisements
It’s 2019 and the concerns at center for the Golden State Warriors are still looming. Mark Medina and Logan Murdock share their thoughts on DeMarcus Cousins, and if he really is the solution to the team’s troubles. Is Robin Lopez an option for the Dubs? all this and more on this week’s episode of the Warriors HQ podcast. 0:20 – Is DeMarcus Cousins the solution to the Warriors troubles at center? 4:10 – Are the Warriors missing Zaza? 15:45 – Can the Warriors get through with a top-heavy lineup?
<reponame>GavinRay97/teiid /* * Copyright Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags and * the COPYRIGHT.txt file distributed with this work. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teiid.core.types; import java.io.BufferedInputStream; import java.io.EOFException; import java.io.Externalizable; import java.io.IOException; import java.io.InputStream; import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.OptionalDataException; import java.io.Reader; import java.nio.charset.Charset; import java.sql.SQLException; import org.teiid.core.types.InputStreamFactory.StreamFactoryReference; import org.teiid.core.util.InputStreamReader; public class BaseLob implements Externalizable, StreamFactoryReference { private static final long serialVersionUID = -1586959324208959519L; private InputStreamFactory streamFactory; private Charset charset; public BaseLob() { } protected BaseLob(InputStreamFactory streamFactory) { this.streamFactory = streamFactory; } public void setStreamFactory(InputStreamFactory streamFactory) { this.streamFactory = streamFactory; } public InputStreamFactory getStreamFactory() throws SQLException { if (this.streamFactory == null) { throw new SQLException("Already freed"); //$NON-NLS-1$ } return streamFactory; } public void setEncoding(String encoding) { if (encoding != null) { this.charset = Charset.forName(encoding); } else { this.charset = null; } } public Charset getCharset() { return charset; } public void setCharset(Charset charset) { this.charset = charset; } public void free() { //we don't actually free the underlying streamFactory as this could be a caching scenario this.streamFactory = null; } public Reader getCharacterStream() throws SQLException { try { Reader r = this.getStreamFactory().getCharacterStream(); if (r != null) { return r; } } catch (IOException e) { SQLException ex = new SQLException(e.getMessage()); ex.initCause(e); throw ex; } Charset cs = getCharset(); if (cs == null) { cs = Streamable.CHARSET; } return new InputStreamReader(getBinaryStream(), cs.newDecoder()); } public InputStream getBinaryStream() throws SQLException { try { return this.getStreamFactory().getInputStream(); } catch (IOException e) { SQLException ex = new SQLException(e.getMessage()); ex.initCause(e); throw ex; } } @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { streamFactory = (InputStreamFactory)in.readObject(); try { charset = (Charset) in.readObject(); } catch (EOFException e) { //just ignore } catch (OptionalDataException e) { //just ignore } } @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(streamFactory); out.writeObject(charset); } /** * Returns the number of bytes. */ public long length() throws SQLException{ if (getStreamFactory().getLength() == -1) { getStreamFactory().setLength(length(getBinaryStream())); } return getStreamFactory().getLength(); } static long length(InputStream is) throws SQLException { if (!(is instanceof BufferedInputStream)) { is = new BufferedInputStream(is); } try { long length = 0; while (is.read() != -1) { length++; } return length; } catch (IOException e) { throw new SQLException(e); } finally { try { is.close(); } catch (IOException e) { } } } }
The Microsoft Technology Education and Literacy in School program will expand into more than 15 northeastern Wisconsin high schools. The Microsoft Technology Education and Literacy in School program will expand into more than 15 northeastern Wisconsin high schools for the 2018-19 academic year and is seeking IT professionals to volunteer to work directly with classroom teachers. TEALS trains industry professionals to help teachers with introductory and AP computer-science classes through co-teaching, acting as lab assistants and by providing other help. The goal of the initiative is to help high schools throughout the U.S. build and grow sustainable computer-science programs. Participating schools pay nothing but stipends to their volunteers. A public information session will be 5 to 7 p.m. Wednesday at the Girl Scouts of the Northwestern Great Lakes Council Appleton Service Center, 4693 N. Lynndale Drive. RSVP at eventbrite.com/e/teals-volunteer-info-session-appleton-tickets-45503797082. For more information, visit tealsk12.org.
Arthropathy in patients with moderate hemophilia a: a systematic review of the literature. Chronic arthropathy is a major complication in severe hemophilia A (Factor VIII < 1%). Almost all adults with severe hemophilia, who have not received prophylaxis with FVIII since their early childhood, suffer from chronic arthropathy. Patients with moderate hemophilia (FVIII activity 1-5%) usually experience fewer joint bleeds than those with severe hemophilia and are thought to rarely develop a significant degree of chronic arthropathy. However, some patients with moderate hemophilia behave like those with the severe form of the disorder, reporting several joint bleeds per year and significant joint impairment. Currently, only little data are available about the prevalence of arthropathy, the degree of quality of life impairment, and the need for orthopedic care/aids in patients with moderate hemophilia. In this systematic review of literature, the prevalence of overt arthropathy ranges between 15 and 77% in patients with moderate hemophilia and prophylactic replacement treatment is prescribed in approximately 30% of these patients, usually after diagnosis of clinically overt arthropathy. Moreover, because of the lack of imaging studies (magnetic resonance and/or ultrasound), the prevalence of subclinical arthropathy cannot be determined. These data confirm that severity of hemophilia should not be defined only according to FVIII levels and that a relevant proportion of patients with nonsevere hemophilia might benefit from a "tailored early prophylaxis."
(Photo by Andrew) A weekly column about how young people are totally fucked lol. Shouts to EAT, the beige-on-beige-on-beige sandwich and coffee chain. They're celebrating the new National Living Wage by no longer paying their staff during lunch breaks, it emerged this week. Doing that means they avoid paying their grasping employees £3.60 to sit on their lazy arses shovelling food into their mouths for half an hour, with the aim of topping up their energy levels so they can spend the next few hours serving customers something disconcertingly similar to breakfast on a long-haul flight. And let's hear it also for Café Nero, which blazed the trail that EAT followed. Nero is getting really into the spirit of the new wage bump by no longer offering its staff free lunch. Instead, staff now get a 65 percent discount on food, so they can spend the extra money on underwhelming paninis. "There is absolutely no limit on the amount that you can buy for personal consumption," a letter to Nero staff published by Buzzfeed News helpfully points out. These measures are being taken to offset the cost of the new National Living Wage, which came in this month. You wouldn't think adding another 50p an hour would be that big a deal for companies this large, but apparently paying people almost enough to live on is a real stretch. Poverty-related news doesn't end there, as food bank usage rose again this year, according to the Trussell Trust, an organisation that runs a network of food banks. The trust alone distributed 1,109,309 emergency food packages between 2015-16, so the actual scale of food bank usage could be way higher. The charity urged people not to accept food poverty as the "new normal". Areas with a lot of people unable to work due to long-term sickness or disability had a particularly high usage of food banks. Another cause is the growing number of people on low incomes. In the meantime, if you're a struggling family, maybe you could make your way out of poverty with a few savvy investments. You'd certainly have the government's blessing. This week, David Cameron defended his familial offshore wealth, saying "we must always support those who want to own shares and make investments to support their families". His statement to Parliament managed to turn a debate about super-elite tax avoidance into a discussion about families who want to put something away for their kids. Unfortunately, while passing on wealth or a home may be a natural "instinct", passing on a low income seems more like an inevitability. A study of over a quarter of a million people released this week showed that graduates from poor backgrounds tend to earn 10 percent less than graduates from rich backgrounds. In 2012/13, the average gap was £8,000 for men and £5,300 for women, ten years after graduation. Seems like the best way to get rich is to already be rich. Meanwhile, Tesco Mobile have done one of those bits of "research" designed to get their brand in the news. So take this with a pinch of salt, but they reckon 40 percent of parents of teen and adult Londoners are paying their children's bills, with 75 percent claiming their child needs help financially. The government says it's taking us to a "higher wage society". On the evidence so far, that means longer queues outside food banks as bosses snatch away everyone's free paninis and find ever more innovative ways not to pay people. Looks like mummy and daddy are gonna be covering our phone bills for some time to come. @SimonChilds13 More from VICE: How Offshore Tax Avoidance Can Be Stopped in London We Might Be Fucked, But Let's At Least Hold the People Fucking Us to Account The Rich Bastard's Guide to Choosing Which Tax Haven Is Right for You
<filename>ocridcardlibrary/src/main/java/com/kernal/passportreader/sdk/CardsCameraActivity.java package com.kernal.passportreader.sdk; import android.annotation.TargetApi; import android.app.Activity; import android.app.Service; import android.content.Intent; import android.net.Uri; import android.os.Build; import android.os.Bundle; import android.os.Vibrator; import android.util.Log; import android.view.View; import android.view.ViewGroup; import android.view.Window; import android.view.WindowManager; import android.widget.ImageButton; import android.widget.RelativeLayout; import android.widget.Toast; import androidx.annotation.Nullable; import androidx.appcompat.app.AppCompatActivity; import com.kernal.passportreader.sdk.utils.CardScreenUtil; import com.kernal.passportreader.sdk.utils.DefaultPicSavePath; import com.kernal.passportreader.sdk.view.ScanCardsView; import kernal.idcard.android.ResultMessage; import kernal.idcard.camera.CardOcrRecogConfigure; import kernal.idcard.camera.IScanReturnMessage; import kernal.idcard.camera.SharedPreferencesHelper; import kernal.idcard.camera.UritoPathUtil; /** * @author A@H * @describle 相机界面,主要识别结果的获取以及识别完成之后的跳转 */ public class CardsCameraActivity extends AppCompatActivity implements IScanReturnMessage, View.OnClickListener { RelativeLayout relativeLayout; RelativeLayout.LayoutParams imageButton_flash_params, imageButton_camera_params, imageButton_back_params, imageButton_spot_dection_params, imageButton_ejct_params; ImageButton imageButton_flash, imageButton_camera, imageButton_back, imageButton_spot_dection, imageButton_ejct; ScanCardsView scanICamera; private boolean isOpenFlash = false; private int width, height; private boolean isOpendetectLightspot = false; public CardsCameraActivity() { CardOcrRecogConfigure.getInstance() //设置识别返回的语言 .initLanguage(getApplicationContext()) //证件类型的ID .setnMainId(SharedPreferencesHelper.getInt( getApplicationContext(), "nMainId", 2)) //证件类型的子ID .setnSubID(SharedPreferencesHelper.getInt( getApplicationContext(), "nSubID", 0)) //身份证的正反面区分 0-自动区分;1-只识别正面;2-只识别反面(注:不设置默认为0) .setFlag(0) //设置扫描的方式 0-指导框扫描;1-实时监测边线 .setnCropType(1) //是否保存全图 .setSaveFullPic(true) //是否保存裁切图 .setSaveCut(true) //是否保存证件的头像 .setSaveHeadPic(true) //是否开启获取泰文的坐标点(注:只在识别泰国身份证,设置该参数) .setOpenGetThaiFeatureFuction(false) //是否开启证件识别的复印件的区分(注:黑白复印件通用,彩色复印件和摩尔纹只适用于身份证) .setOpenIDCopyFuction(true) //是否获取泰文的条码图片(注:只适用于泰国身份证) .setThaiCodeJpgPath(false) //是否开启拒识功能,默认开启拒识功能 .setSetIDCardRejectType(true) //设置图片的存储路径(注:默认路径为:Environment.getExternalStorageDirectory().toString() // + "/wtimage/") .setSavePath(new DefaultPicSavePath(this, true)); } /** * 扫描识别成功,界面的跳转(回调接口) * * @param resultMessage 识别结果 * @param picPath 图片路径数组,picPath[0]: 全图路径;picPath[1]: 裁切图;picPath[2]: 证件头像 */ @Override public void scanOCRIdCardSuccess(ResultMessage resultMessage, String[] picPath) { Vibrator mVibrator = (Vibrator) getApplication().getSystemService( Service.VIBRATOR_SERVICE); mVibrator.vibrate(200); Intent intent = new Intent(); Bundle bundle = new Bundle(); bundle.putSerializable("resultMessage", resultMessage); bundle.putStringArray("picpath", picPath); intent.putExtra("resultbundle", bundle); this.setResult(Activity.RESULT_OK, intent); this.finish(); } /** * 扫描识别失败,界面的跳转(回调接口) * * @param error -10601 开发码错误,把该文件中的{@link com.kernal.passportreader.sdk.utils.Devcode}中的devcode替换 * -10602 applicationId错误,把build.gradle文件中的 applicationId修改为授权文件中绑定的信息 * -10603 授权到期,请从新申请授权 * -10605 string.xml中的app_name字段属性和授权文件中绑定的不一致 * -10606 string.xml中company_name字段属性和授权文件中绑定的不一致 * -10608 string.xml中缺少company_name字段,请添加该字段 * @param picPath 该数组存储的是拍照识别失败时的全图路径 */ @Override public void scanOCRIdCardError(String error, String[] picPath) { Intent intent = new Intent(); intent.putExtra("error", error); intent.putExtra("strpicpath", picPath[0]); this.setResult(Activity.RESULT_OK, intent); this.finish(); } @Override public void authOCRIdCardSuccess(String result) { } /** * 授权失败 * * @param error * @see */ @Override public void authOCRIdCardError(String error) { Intent intent = new Intent(); intent.putExtra("error", error); this.setResult(Activity.RESULT_OK, intent); this.finish(); } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); if (requestCode == 9 && resultCode == Activity.RESULT_OK) { Uri uri = data.getData(); scanICamera.importPicRecog(UritoPathUtil.getImageAbsolutePath(getApplicationContext(), uri)); } } @Override public void onClick(View v) { //返回事件 if (v == imageButton_back) { this.finish(); //拍照事件 } else if (v == imageButton_camera) { scanICamera.takePicRecog(); //闪光灯 } else if (v == imageButton_flash) { if (isOpenFlash) { isOpenFlash = false; scanICamera.managerFlashLight(isOpenFlash); imageButton_flash.setBackgroundResource(R.mipmap.flash_off); } else { isOpenFlash = true; scanICamera.managerFlashLight(isOpenFlash); imageButton_flash.setBackgroundResource(R.mipmap.flash_on); } //隐藏、显示拍照事件 } else if (v == imageButton_ejct) { imageButton_ejct.setVisibility(View.GONE); imageButton_camera.setVisibility(View.VISIBLE); // 光斑检测事件 } else if (v == imageButton_spot_dection) { if (isOpendetectLightspot) { Toast.makeText(this, getString(R.string.closeddetectLightspot), Toast.LENGTH_SHORT).show(); isOpendetectLightspot = false; imageButton_spot_dection.setBackgroundResource(R.mipmap.spot_dection_off); scanICamera.managerSpotDection(isOpendetectLightspot); } else { Toast.makeText(this, getString(R.string.opendetectLightspot), Toast.LENGTH_SHORT).show(); isOpendetectLightspot = true; imageButton_spot_dection.setBackgroundResource(R.mipmap.spot_dection_on); scanICamera.managerSpotDection(isOpendetectLightspot); } } } @Override @TargetApi(16) protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN); getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); requestWindowFeature(Window.FEATURE_NO_TITLE); hideBottomUIMenu(); setContentView(R.layout.activity_idcardscan); width = CardScreenUtil.getScreenResolution(this).x; height = CardScreenUtil.getScreenResolution(this).y; // WriteUtil.writeLog("该设备的屏幕分辨率为:"+width+"X"+height); relativeLayout = findViewById(R.id.camera_layout); scanICamera = new ScanCardsView(this); if (CardScreenUtil.getScreenOrientation(this) == CardScreenUtil.ORIENTATION_LANDSCAPE) { //闪光灯 imageButton_flash = new ImageButton(this); imageButton_flash.setBackground(getResources().getDrawable(R.mipmap.flash_off)); imageButton_flash_params = new RelativeLayout.LayoutParams((int) (width * 0.05), (int) (width * 0.05)); imageButton_flash_params.addRule(RelativeLayout.ALIGN_PARENT_TOP, R.id.camera_layout); imageButton_flash_params.addRule(RelativeLayout.ALIGN_PARENT_LEFT, R.id.camera_layout); imageButton_flash_params.leftMargin = (int) (width * 0.02); imageButton_flash_params.topMargin = (int) (width * 0.02); //拍照按钮 imageButton_camera = new ImageButton(this); imageButton_camera.setBackground(getResources().getDrawable(R.mipmap.tack_pic_btn)); imageButton_camera_params = new RelativeLayout.LayoutParams((int) (width * 0.1), (int) (width * 0.1)); imageButton_camera_params.addRule(RelativeLayout.ALIGN_PARENT_RIGHT, R.id.camera_layout); imageButton_camera_params.addRule(RelativeLayout.CENTER_VERTICAL); imageButton_camera_params.rightMargin = (int) (width * 0.02); imageButton_camera.setVisibility(View.GONE); //返回按钮 imageButton_back = new ImageButton(this); imageButton_back.setBackground(getResources().getDrawable(R.mipmap.camera_back_nomal)); imageButton_back_params = new RelativeLayout.LayoutParams((int) (width * 0.05), (int) (width * 0.05)); imageButton_back_params.addRule(RelativeLayout.ALIGN_PARENT_BOTTOM, R.id.camera_layout); imageButton_back_params.addRule(RelativeLayout.ALIGN_PARENT_LEFT, R.id.camera_layout); imageButton_back_params.leftMargin = (int) (width * 0.02); imageButton_back_params.bottomMargin = (int) (width * 0.02); //光斑检测按钮 imageButton_spot_dection = new ImageButton(this); imageButton_spot_dection.setBackgroundResource(R.mipmap.spot_dection_off); imageButton_spot_dection_params = new RelativeLayout.LayoutParams((int) (width * 0.08), (int) (width * 0.08)); imageButton_spot_dection_params.addRule(RelativeLayout.ALIGN_PARENT_LEFT, R.id.camera_layout); imageButton_spot_dection_params.addRule(RelativeLayout.CENTER_VERTICAL); imageButton_spot_dection_params.leftMargin = (int) (width * 0.02); //隐藏拍照按钮 imageButton_ejct = new ImageButton(this); imageButton_ejct.setBackground(getResources().getDrawable(R.mipmap.locker_btn_def)); imageButton_ejct_params = new RelativeLayout.LayoutParams((int) (height * 0.05), (int) (height * 0.5)); imageButton_ejct_params.addRule(RelativeLayout.ALIGN_PARENT_RIGHT, R.id.camera_layout); imageButton_ejct_params.addRule(RelativeLayout.CENTER_VERTICAL); } else { //竖屏状态下框的方向 true:width>height; false:height>width //ViewfinderView.isSameScreen = false; //闪光灯 imageButton_flash = new ImageButton(this); imageButton_flash.setBackground(getResources().getDrawable(R.mipmap.flash_off)); imageButton_flash_params = new RelativeLayout.LayoutParams((int) (height * 0.05), (int) (height * 0.05)); imageButton_flash_params.addRule(RelativeLayout.ALIGN_PARENT_TOP, R.id.camera_layout); imageButton_flash_params.addRule(RelativeLayout.ALIGN_PARENT_RIGHT, R.id.camera_layout); imageButton_flash_params.rightMargin = (int) (height * 0.02); imageButton_flash_params.topMargin = (int) (height * 0.02); //拍照按钮 imageButton_camera = new ImageButton(this); imageButton_camera.setBackground(getResources().getDrawable(R.mipmap.tack_pic_btn)); imageButton_camera_params = new RelativeLayout.LayoutParams((int) (height * 0.1), (int) (height * 0.1)); imageButton_camera_params.addRule(RelativeLayout.ALIGN_PARENT_BOTTOM, R.id.camera_layout); imageButton_camera_params.addRule(RelativeLayout.CENTER_HORIZONTAL); imageButton_camera_params.bottomMargin = (int) (height * 0.02); imageButton_camera.setVisibility(View.GONE); //返回按钮 imageButton_back = new ImageButton(this); imageButton_back.setBackground(getResources().getDrawable(R.mipmap.camera_back_nomal)); imageButton_back_params = new RelativeLayout.LayoutParams((int) (height * 0.05), (int) (height * 0.05)); imageButton_back_params.addRule(RelativeLayout.ALIGN_PARENT_TOP, R.id.camera_layout); imageButton_back_params.addRule(RelativeLayout.ALIGN_PARENT_LEFT, R.id.camera_layout); imageButton_back_params.leftMargin = (int) (height * 0.02); imageButton_back_params.topMargin = (int) (height * 0.02); //光斑检测按钮 imageButton_spot_dection = new ImageButton(this); imageButton_spot_dection.setBackgroundResource(R.mipmap.spot_dection_off); imageButton_spot_dection_params = new RelativeLayout.LayoutParams((int) (height * 0.08), (int) (height * 0.08)); imageButton_spot_dection_params.addRule(RelativeLayout.ALIGN_PARENT_TOP, R.id.camera_layout); imageButton_spot_dection_params.addRule(RelativeLayout.CENTER_HORIZONTAL); imageButton_spot_dection_params.topMargin = (int) (height * 0.02); //隐藏拍照按钮 imageButton_ejct = new ImageButton(this); imageButton_ejct.setBackground(getResources().getDrawable(R.mipmap.locker_btn_def_p)); imageButton_ejct_params = new RelativeLayout.LayoutParams((int) (width * 0.5), (int) (width * 0.05)); imageButton_ejct_params.addRule(RelativeLayout.ALIGN_PARENT_BOTTOM, R.id.camera_layout); imageButton_ejct_params.addRule(RelativeLayout.CENTER_HORIZONTAL); } relativeLayout.addView(scanICamera, new RelativeLayout.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT)); relativeLayout.addView(imageButton_flash, imageButton_flash_params); relativeLayout.addView(imageButton_camera, imageButton_camera_params); relativeLayout.addView(imageButton_back, imageButton_back_params); relativeLayout.addView(imageButton_spot_dection, imageButton_spot_dection_params); relativeLayout.addView(imageButton_ejct, imageButton_ejct_params); imageButton_camera.setOnClickListener(this); imageButton_back.setOnClickListener(this); imageButton_flash.setOnClickListener(this); imageButton_ejct.setOnClickListener(this); imageButton_spot_dection.setOnClickListener(this); } @Override protected void onResume() { super.onResume(); scanICamera.setIScan(this); scanICamera.startCamera(); } @Override protected void onPause() { super.onPause(); scanICamera.stopCamera(); } @Override protected void onDestroy() { super.onDestroy(); scanICamera.destroyService(); } @Override public void openCameraError(String error) { Log.i("string", "失败的信息" + error); } /** * 隐藏虚拟按键,并且全屏 */ protected void hideBottomUIMenu() { //隐藏虚拟按键,并且全屏 if (Build.VERSION.SDK_INT > 11 && Build.VERSION.SDK_INT < 19) { // lower api View v = this.getWindow().getDecorView(); v.setSystemUiVisibility(View.GONE); } else if (Build.VERSION.SDK_INT >= 19) { //for new api versions. View decorView = getWindow().getDecorView(); int uiOptions = View.SYSTEM_UI_FLAG_LAYOUT_STABLE | View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION | View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN | View.SYSTEM_UI_FLAG_HIDE_NAVIGATION // hide nav bar // | View.SYSTEM_UI_FLAG_FULLSCREEN // hide status bar | View.SYSTEM_UI_FLAG_IMMERSIVE; decorView.setSystemUiVisibility(uiOptions); getWindow().addFlags(WindowManager.LayoutParams.FLAG_TRANSLUCENT_NAVIGATION); } } }
November 3rd, 2016 David Graham: Your FOSS Rep in the Canadian Parliament Linux and open source has a friend in the “Great White North,” and we don’t mean one of the McKenzie brothers. As an MP, this friend works to bring awareness of open source to Ottawa as he serves the interests of the people of his district in Quebec. The FOSS Force Video Viewing Room You didn’t know we had one? We do! Sort of. David Graham is the Member of Parliament for Laurentides—Labelle, which is in Quebec. He’s also a cofounder of the OFTC (Open and Free Technology) IRC network and for many years used the online handle “CDLU,” for “Confused Debian Linux User.” Confused or not, he got his start in politics running for (and becoming) Secretary of Software in the Public Interest, a non-profit group that helps develop and spread free and open source software, most notably Debian Linux. David was also the newsfeed editor for Linux.com for eight years (Disclosure: I was his boss). He’s also a licensed pilot, a rail fan and the father of a delightful little girl. Hey! I’d vote for him. Wouldn’t you? Assuming we lived in his district, that is. One Member of Parliament can’t suddenly switch the Canadian government to open source software. David can, however, serve in the Digital Caucus (which he co-founded) and help raise awareness of FOSS in Canadian government circles. Rest assured, he does just that, but it’s only one of many priorities he has while representing his constituents. So does his presence in elected office do anything for Linux? Again, he raises awareness, but doesn’t directly negotiate IT contracts, so his influence is limited. But when he’s Prime Minister — which is no more unlikely today than some of us once considered the idea of David actually winning a seat in the House of Commons — things might suddenly change up there in the Northland, eh? Related
There were a number of incidents during the Second World War which were confusing or mysterious, many of them immediately presenting a puzzle that took a long time to solve, if there ever was a solution. Shadow Divers is the story of the partial resolution of one of those mysteries. The book covers a curious incident that uncovered a mystery from the World War II era, one which hasn’t been completely solved and probably never will be. In 1991 a group of deep sea scuba divers, led by John Chatterton, a legendary diver, found a U-boat 60 miles off the coast of New Jersey, in about 210 feet of water. This was about the limit of how deep scuba divers could go in that era, and so the dives were hazardous. Eventually, three men died diving on the wreck. When the wreck was discovered, Chatterton began to attempt to identify the sub, but was unable to do so. When another diver, Richie Kohler, joined the effort, the two men spent a great deal of their time onshore attempting to discover which U-boat this wreck could be, while at the same time trying, when diving on the wreck, to recover an artifact which would settle the controversy. Chatterton and Kohler consulted experts on both sides of the Atlantic, Kohler even traveling to Germany to visit and talk to people, attempting to garner more information about lost U-boats. The solution to the mystery, when it emerged with the discovery of an engine plate that had the boat’s number on it, only partially solved the question. The difficulty is that while the boat’s been identified, how it was sunk is still a mystery because no one survived the wreck. This is an extremely well-written book, full of the lore of deep-sea scuba diving, and full also of information on World War II U-boats. I would recommend this book to almost anyone interested in the sea, diving, submarines, the Second World War, and frankly to anyone who reads general non-fiction.
Review symposium: Generational succession in the Big Apple Abstract Inheriting the City presents the results of a major research project on the children of immigrants in New York City, focusing on eight groups, five of which are immigrant groups: Dominicans; South Americans from Colombia, Ecuador and Peru; English-speaking West Indians; the Chinese; and Russian Jews. The three comparison groups are native whites, native blacks, and Puerto Ricans. The symposium allowed three critics who have followed this project from its earliest phases to assess the results and the authors to respond to the issues raised by their commentaries.
package de.simpleworks.staf.module.jira.module; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import com.atlassian.jira.rest.client.api.IssueRestClient; import com.atlassian.jira.rest.client.api.JiraRestClient; import com.atlassian.jira.rest.client.api.JiraRestClientFactory; import com.atlassian.jira.rest.client.internal.async.AsynchronousJiraRestClientFactory; import com.google.inject.AbstractModule; import com.google.inject.Provides; import de.simpleworks.staf.commons.exceptions.SystemException; import de.simpleworks.staf.module.jira.util.JiraProperties; public class JiraModule extends AbstractModule { private static final Logger logger = LogManager.getLogger(JiraModule.class); private final JiraProperties properties; private JiraRestClient client; public JiraModule() { properties = JiraProperties.getInstance(); } @Override protected void configure() { URI uri = null; try { uri = getJiraUri(); final JiraRestClientFactory factory = new AsynchronousJiraRestClientFactory(); client = factory.createWithBasicHttpAuthentication(uri, properties.getUsername(), properties.getPassword()); } catch (final Throwable th) { final String msg = String.format("can't create jira rest client (url: '%s', user: '%s')", uri, properties.getUsername()); JiraModule.logger.error(msg, th); throw new RuntimeException(msg); } } @Provides public IssueRestClient getIssueRestClient() { return client.getIssueClient(); } private URI getJiraUri() throws SystemException { if (properties == null) { throw new IllegalArgumentException("configuration can't be null."); } final URL url = properties.getUrl(); if (url == null) { throw new IllegalArgumentException("url can't be null."); } URI uri; try { uri = url.toURI(); } catch (final URISyntaxException ex) { throw new SystemException( String.format("can't parse URL %s to URI, due to %s.", url.toString(), ex.getMessage())); } return uri; } }