content
stringlengths 10
4.9M
|
---|
/**
*
*/
package com.mangodemo.repository;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.mangodemo.BaseTest;
import com.mangodemo.model.Person;
/**
* @author jiazhang
*
*/
public class PersonRepositoryTest extends BaseTest {
@Test
public void insert() {
PersonRepository personRepository = mango.create(PersonRepository.class);
Person person = new Person(1, "张三", 14, "深圳");
int effertlines = personRepository.addPerson(person);
Assert.assertEquals(effertlines, 1);
}
}
|
// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package randvar
import (
"encoding/binary"
"fmt"
"regexp"
"strconv"
"strings"
"golang.org/x/exp/rand"
)
var randVarRE = regexp.MustCompile(`^(?:(latest|uniform|zipf):)?(\d+)(?:-(\d+))?$`)
// Flag provides a command line flag interface for specifying static random
// variables.
type Flag struct {
Static
spec string
}
// NewFlag creates a new Flag initialized with the specified spec.
func NewFlag(spec string) *Flag {
f := &Flag{}
if err := f.Set(spec); err != nil {
panic(err)
}
return f
}
func (f *Flag) String() string {
return f.spec
}
// Type implements the Flag.Value interface.
func (f *Flag) Type() string {
return "randvar"
}
// Set implements the Flag.Value interface.
func (f *Flag) Set(spec string) error {
m := randVarRE.FindStringSubmatch(spec)
if m == nil {
return fmt.Errorf("invalid random var spec: %s", spec)
}
min, err := strconv.Atoi(m[2])
if err != nil {
return err
}
max := min
if m[3] != "" {
max, err = strconv.Atoi(m[3])
if err != nil {
return err
}
}
switch strings.ToLower(m[1]) {
case "", "uniform":
f.Static = NewUniform(nil, uint64(min), uint64(max))
case "latest":
f.Static, err = NewSkewedLatest(nil, uint64(min), uint64(max), 0.99)
if err != nil {
return err
}
case "zipf":
var err error
f.Static, err = NewZipf(nil, uint64(min), uint64(max), 0.99)
if err != nil {
return err
}
default:
return fmt.Errorf("unknown random var distribution: %s", m[1])
}
f.spec = spec
return nil
}
// BytesFlag provides a command line flag interface for specifying random
// bytes. The specification provides for both the length of the random bytes
// and a target compression ratio.
type BytesFlag struct {
sizeFlag Flag
targetCompression float64
spec string
}
// NewBytesFlag creates a new BytesFlag initialized with the specified spec.
func NewBytesFlag(spec string) *BytesFlag {
f := &BytesFlag{}
if err := f.Set(spec); err != nil {
panic(err)
}
return f
}
func (f *BytesFlag) String() string {
return f.spec
}
// Type implements the Flag.Value interface.
func (f *BytesFlag) Type() string {
return "randbytes"
}
// Set implements the Flag.Value interface.
func (f *BytesFlag) Set(spec string) error {
parts := strings.Split(spec, "/")
if len(parts) == 0 || len(parts) > 2 {
return fmt.Errorf("invalid randbytes spec: %s", spec)
}
if err := f.sizeFlag.Set(parts[0]); err != nil {
return err
}
f.targetCompression = 1.0
if len(parts) == 2 {
var err error
f.targetCompression, err = strconv.ParseFloat(parts[1], 64)
if err != nil {
return err
}
}
f.spec = spec
return nil
}
// Bytes returns random bytes. The length of the random bytes comes from the
// internal sizeFlag.
func (f *BytesFlag) Bytes(r *rand.Rand) []byte {
size := int(f.sizeFlag.Uint64())
uniqueSize := int(float64(size) / f.targetCompression)
if uniqueSize < 1 {
uniqueSize = 1
}
data := make([]byte, size)
offset := 0
for offset+8 <= uniqueSize {
binary.LittleEndian.PutUint64(data[offset:], r.Uint64())
offset += 8
}
word := r.Uint64()
for offset < uniqueSize {
data[offset] = byte(word)
word >>= 8
offset++
}
for offset < size {
data[offset] = data[offset-uniqueSize]
offset++
}
return data
}
|
/**
* Builder class for {@link Request}.
* Provides a convenient way to set various fields to {@link Request}
*/
public static class Builder {
private String url;
private String method;
private String userAgent;
private Params params;
private int connectTimeout;
private int readTimeout;
private boolean usesCache;
public Builder(String url) {
url(url);
method(GET);
userAgent(MOBILE_USER_AGENT);
timeout(READ_TIMEOUT, CONNECT_TIMEOUT);
}
public Builder url(String url) {
this.url = url;
return this;
}
public Builder method(String method) {
this.method = method;
return this;
}
public Builder userAgent(String userAgent) {
this.userAgent = userAgent;
return this;
}
public Builder params(Params params) {
this.params = params;
return this;
}
public Builder putParam(String name, String value) {
createParams().put(name, value);
return this;
}
public Builder putParam(String name, int value) {
createParams().put(name, value);
return this;
}
public Builder putParam(String name, long value) {
createParams().put(name, value);
return this;
}
public Builder putParam(String name, boolean value) {
createParams().put(name, value);
return this;
}
public Builder timeout(int read, int connect) {
this.readTimeout = read;
this.connectTimeout = connect;
return this;
}
public Builder usesCache(boolean usesCache) {
this.usesCache = usesCache;
return this;
}
public Request build() {
return new Request(this);
}
private Params createParams() {
if (params == null) {
params = new Params();
}
return params;
}
}
|
After a week of being behind Hillary Clinton by ten points or more in various polls, Donald Trump has suddenly pulled to nearly even in a national poll by Reuters/Ipsos released Saturday.
Clinton leads with 42 percent of the vote compared to 39 percent for Trump, a significant gain for the GOP nominee, who had been behind by eight points in a Reuters poll Monday.
"The reasons behind the shift were unclear," Reuters said.
The survey was conducted July 31 to August 4 as controversy raged over Trump's response to a Muslim couple, Khizr and Ghazala Khan, who spoke out against the Republican during the Democratic National Convention. Their son had died fighting in Iraq. Trump questioned why Ghazala Khan didn't speak, insinuating she was prevented from doing so because she is Muslim.
The online poll of 1,154 likely voters had a margin of error of plus or minus 3 points, Reuters said.
|
<reponame>vknaisl/munchen_tdd<filename>sources/src/main/java/cz/knaisl/munchen_tdd/core/converter/DateToZonedDateTimeConverter.java
package cz.knaisl.munchen_tdd.core.converter;
import org.springframework.core.convert.converter.Converter;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.Date;
public class DateToZonedDateTimeConverter implements Converter<Date, ZonedDateTime> {
public ZonedDateTime convert(Date source) {
return source == null ? null : ZonedDateTime.ofInstant(source.toInstant(), ZoneId.systemDefault());
}
}
|
All-carbon-nanotube flexible multi-electrode array for neuronal recording and stimulation
Neuro-prosthetic devices aim to restore impaired function through artificial stimulation of the nervous system. A lingering technological bottleneck in this field is the realization of soft, micron sized electrodes capable of injecting enough charge to evoke localized neuronal activity without causing neither electrode nor tissue damage. Direct stimulation with micro electrodes will offer the high efficacy needed in applications such as cochlear and retinal implants. Here we present a new flexible neuronal micro electrode device, based entirely on carbon nanotube technology, where both the conducting traces and the stimulating electrodes consist of conducting carbon nanotube films embedded in a polymeric support. The use of carbon nanotubes bestows the electrodes flexibility and excellent electrochemical properties. As opposed to contemporary flexible neuronal electrodes, the technology presented here is both robust and the resulting stimulating electrodes are nearly purely capacitive. Recording and stimulation tests with chick retinas were used to validate the advantageous properties of the electrodes and demonstrate their suitability for high-efficacy neuronal stimulation applications.
Introduction
Flexible neuronal micro electrode technology progressed extensively over the past several decades hand in hand with the overall development in the field of neuro-prosthetics. Several novel fabrication approaches suited for micro electrode applications were devised. These schemes attempt to achieve flexible electronic technology integration with high surface roughness while maintaining bio-compatibility and durability in physiological conditions. Commonly, these devices use metal electrodes such as gold (Sandison et al. 2002;Chen et al. 2009Chen et al. , 2011aWester et al. 2009;Lacour et al. 2010;Wei et al. 2011), titanium (Takeuchi et al. 2004), electroplated platinum black (Adams et al. 2005;Rodger et al. 2008;Graudejus et al. 2009Graudejus et al. , 2012Rui et al. 2011), tungsten (Wei et al. 2011), platinum (Cheung et al. 2007;Mercanzini et al. 2008;Myllymaa et al. 2009;Viventi et al. 2011) and iridium (Rodger et al. 2008;Fomani and Mansour 2011) deposited on various flexible supports such as polyimide (Sandison et al. 2002;Takeuchi et al. 2004;Cheung et al. 2007;Viventi et al. 2011), parylene C (Rodger et al. 2008;Wester et al. 2009) or poly(dimethylsiloxane) (PDMS) (Graudejus et al. 2009(Graudejus et al. , 2012Lacour et al. 2010;Wei et al. 2011). These metal electrodes achieve neural stimulation by Faradaic current injection through the electrode-electrolyte interface. Electron transfer, associated with the Faradaic charge stimulation, can induce irreversible reduction and oxidation reactions that can damage both the electrode and the tissue (Merrill et al. 2005;Cogan 2008). Storage and injection of charge can also occur from valence changes in multivalent electrode coatings such as Moshe David-Pur and Lilach Bareket-Keren contributed equally to this work.
Electronic supplementary material The online version of this article (doi:10.1007/s10544-013-9804-6) contains supplementary material, which is available to authorized users. Iridium oxide (Robblee et al. 1983;Klein et al. 1989) that undergo reversible reduction-oxidation reactions (Merrill et al. 2005;Cogan 2008). Consequently, capacitive charge stimulation is preferable for neuronal stimulation, as it involves only a displacement current associated with charging and discharging of the electrode-electrolyte double layer (Merrill et al. 2005). Common capacitive electrode materials include titanium nitride (TiN), tantalumtantalum oxide and the more recently investigated carbon nanotubes (CNTs) (Rose et al. 1985;Gabay et al. 2007;Cogan 2008). Conducting polymers, such as polypyrrole (PPy) and poly(ethylenedioxythiophene) (PEDOT) are mixed conductors, exhibiting both electron and ion transport within the polymer film (Ludwig et al. 2006;Abidian et al. 2010;Blau et al. 2011).
A related key requirement in neuronal electrode technology is large specific capacitance (C s ). Large specific capacitance reduces the electrode impedance, without increasing its geometric area. The reduction in impedance is essential for efficient, high resolution neuronal recording and stimulation (Robinson 1968;Loeb et al. 1995;Merrill et al. 2005;Cogan 2008). One of the best materials to exhibit both large specific capacitance as well as non-Faradaic behavior is porous TiN with C s in the range of 2 mFcm −2 (Gabay et al. 2007). It was recently demonstrated that pristine CNTs exhibit similar performances to those of TiN with C s values in the range of 3-10 mFcm −2 (Gabay et al. 2007). Accordingly, CNTs have been suggested by several studies as a future material for neuronal stimulation applications and several fabrication schemes have been studied. Primarily, direct growth of CNT electrodes (Wang et al. 2006;Gabay et al. 2007;Su et al. 2010) as well as CNT coatings of metal electrodes by electro-polymerization (Keefer et al. 2008), drop coating from a solution (Gabriel et al. 2009) and micro-contact printing (Fuchsberger et al. 2011) on a rigid support were described. To accommodate flexibility, CNT transfer onto a polymeric support (Su, Lin et al. 2009;Tsai et al. 2009;Carnahan et al. 2010;Chang-Jian et al. 2010) was recently presented. However, the lack of a simple platform to allow the realization of fully functional devices consisting of pristine CNT surfaces has left this technology so far largely unused.
Here, we present a novel flexible neuronal micro electrode device, based solely on multi-walled CNT (MWCNT) films embedded in a flexible polymeric support. We demonstrate a new simple and robust fabrication technique to realize the seamless CNT circuit on the flexible substrate. Next, the electrical and electrochemical properties of the CNT electrodes and of the CNT conducting traces were studied, using a scheme of specially designed electrode arrays. Finally, the flexible CNT MEA was applied for extracellular neuronal recording and stimulation of chick retinas.
Flexible CNT MEA fabrication
Flexible CNT MEAs were fabricated as follows. First, standard lithography (AZ1518 photoresist; Clariant) was used to form the desired circuit pattern on a Silicon/Silicon dioxide (Si/ SiO 2 ) support. A 2.5 nm Ni catalyst layer was deposited using an e-beam evaporator (VST). A resist lift-off process was then performed, followed by an oxygen plasma treatment to remove all photoresist residues. Next, MWCNTs were grown by chemical vapor deposition (CVD) (Lindberg Blue) with ethylene (20 sccm) and hydrogen (1,000 sccm) at 900°C. A flexible substrate, medical adhesive tape, parylene C, polyimide or poly(dimethylsiloxane) (PDMS), was applied and peeled off with the CNT pattern. Medical adhesive tape (Steri-Drape, 3 M) was attached to the CNT pattern and pressed lightly. Parylene C was applied by on the CNT pattern by vapor deposition. Polyimide, prepared from a poly(pyromellitic dianhydride-co-4,4′-oxydianiline) 15 wt.% solution in Nmethyl-2-Pyrrolidone (Sigma-Aldrich) was spin coated and cured at 350°C under nitrogen atmosphere. Uncured PDMS (Sylgard 184, Dow Corning), mixed in a 10:1 ratio by weight, was casted or spin coated and cured at 60°C. Peeling-off of very thin PDMS films (~100 μm) required the deposition of a thin Cr layer (2 nm) followed by Au layer (6 nm) using an ebeam evaporator prior to PDMS application to reduce the adhesion between the SiO 2 and the PDMS. To guarantee the final cleanliness of the CNT film, half cured PDMS films (60°C for 5 min) were used in a state of a viscous polymer and were applied as an adhesive film onto the CNTs. The use of partially cured films substantially reduced wetting of the CNTs. Finally, a passivation PDMS membrane with predefined holes and the CNT flexible circuit were bonded. The PDMS passivation layer was prepared using a SU8-3050 (MICRO-CHEM) patterned mold (see Supplementary Fig. 1). PDMS passivation was bonded using a custom made holder mounted on a microscope stage. PDMS-PDMS bonding was promoted by oxygen plasma treatment to both films. Oxidation of PDMS surface exposes silanol groups (Si-OH) so when the two films are brought together they form covalent siloxane bonds (Si-O-Si) which provide excellent sealing (Duffy et al. 1998). Bonding with polyimide and parylene C substrates was achieved by means of an intermediate thin layer of liquid PDMS followed by curing at 60°C. Finally, the medical tape was bonded with the passivation by exploiting the adhesiveness of the tape. These processes yielded 30-65 % clean and capacitive electrodes utilizing an entirely manual preparation. We expect that mechanizing the process can dramatically improve the yield. For electrophysiological experiments the flexible CNT MEA was mounted on a PCB (49×49 mm 2 ) with 60 Au traces and contact pads. A glass chamber was mounted on top of the PCB using uncured PDMS.
Electrical resistance measurements of CNT films
Sets of CNT bars with different lengths and constant width and height, were fabricated between TiN pads as follows. A 100 nm TiN layer was sputtered (MRC RF sputter) on a Si/SiO 2 support followed by lithography and reactive ion etching (Nextral 860) to pattern the TiN pads. Due to a marked difference between the diffusion rate of Ni through SiO 2 and TiN at the CNT growth temperature, two layers of Ni were deposited by an e-beam evaporator (VST). The first layer (8 nm) was deposited on the inner half of the TiN pads and the second layer (2.5 nm) between the TiN pads on the SiO 2 substrate. Finally a CNT film was grown by CVD (for detailed illustration see Supplementary Fig. 2). Current versus voltage screen of the different length TiN-CNTs-TiN bars was recorded and their electrical resistance was calculated (for details see Supplementary Fig. 3).
Electrochemical analysis
The electrochemical properties of the CNT electrodes were characterized by performing cyclic voltammetry (CV) and electrochemical impedance spectroscopy (EIS) in PBS. An Ag/AgCl electrode served as a reference electrode and a platinum wire as a counter electrode. CV measurements were conducted using a potentiostat (263A Princeton Applied Research) under ambient conditions and recorded using the PowerCV software (Princeton Applied Research). The DC capacitance was derived from the oxidation current versus the scan rate data according to the relation: i = C·dV/dt in which i is the charging current, C is the DC capacitance and dV/dt is the scan rate. EIS measurements were conducted under equilibrium conditions by applying small (10 mV) AC signals over the frequency range of 1 Hz to 10 kHz using a lock-in amplifier (SR830, Stanford Research Systems) and a potentiostst (263A, Princeton Applied Research).
Retina preparation and handling
Embryonic chick retinas (day 14) were isolated and transferred to the experimental chamber, placed RGC layer down onto the flexible MEAs. Better coupling between the tissue and the electrodes was achieved by placing a small piece of polyester membrane filter (5 μm pores; Sterlitech, Kent, WA, USA) on the retina followed by a ring weight which served as a slice anchor holder. Retinas were kept at physiological conditions according to a previously reported protocol (Hammerle et al. 1994) with temperature of 34°C and perfuse (2-5 ml/min) with oxygenated artificial cerebro-spinal fluid.
Electrical recording
Neuronal electrical signals were amplified (gain ×1,200, MultiChannel Systems MEA1060-Inv, Reutlingen, Germany), digitized using a 128-channel analogue to digital converter (MultiChannel Systems MC_Card, Reutlingen, Germany) and recorded (MultiChannel Systems MC_Rack, Reutlingen, Germany). All additional signal analysis was performed using Matlab software (MathWorks). Electrically stimulated neuronal activity was digitized at 20 kHz and spikes were detected by setting a threshold of signal to noise ratio (SNR) SNR>4 (related to the pre-stimulation noise level). Due to amplifier saturation artifact, the period of 20 ms post stimulation was ignored. The response of the retinal site to electrical stimulation was defined as the detected spikes count.
Electrical stimulation
Chick retinas were electrically stimulated using a dedicated stimulator (STG-1008, Multi-Channel Systems, Reutlingen, Germany) through one of the MEA electrodes each time (versus an external reference) with charge-balanced biphasic (cathodic first) current stimulation (pulse width: 1 ms and pulse amplitude: 1-10 μA). Each stimulation session included stimulations at the entire intensity range (increased by 1 μA every 10 s) and was repeated five times. To validate that the electrical stimulation resulted from synaptic processes, synaptic blockers CNQX (Sigma) and APV (Sigma) were applied (75 μM and 400 μM respectively).
All-CNT flexible MEA fabrication
We investigated a new fabrication technique utilizing a combination of micro and nano schemes to realize non-Faradaic CNT based electrodes with very high specific capacitance using a simple fabrication process. To support a simple and robust fabrication process, the electrodes are made exclusively of CNTs so no complex fabrication integration was required. The general fabrication process, described in Fig. 1a, is based on loosely-bound MWCNT films grown using CVD process from a thin Ni layer ( Fig. 1a-2). The Ni layer is deposited on a support Si/SiO 2 substrate ( Fig. 1a-1). An uncured polymer (e.g. PDMS or polyimide) is then casted on the substrate with the CNT film. After curing, the CNTs are integrated with the polymer. The polymer and the CNT films can then be peeled-off from the surface (Fig. 1a-3). Similar results can be obtained by applying an adhesive tape against the CNT pattern or by using vapor deposition of Parylene C. The CNT carrying film and a second layer of holey PDMS membrane are then bonded together (Fig. 1a-4) to form a flexible circuit containing passivated CNT conducting tracks and exposed CNT electrodes. The biocompatibility of PDMS, parylene C and polyimide is well established. Polyimide and parylene C have comparable elastic moduli of~2-4 GPa (two to three orders of magnitude lower than that of metal and silicon), while PDMS elasticity (depending on preparation conditions) can be further reduced down to~0.05 MPa (Rousche et al. 2001;Brown et al. 2005;Rodger et al. 2008;Meacham et al. 2011). Polyimide can be patterned using standard microfabrication such as photolithography and reactive ion etching (Cheung et al. 2007;Mercanzini et al. 2008) and parylene C has superior resistance to moisture. Finally, the adhesive medical tape enables quick and simple fabrication with well exposed CNT films. Such films may be well suited for skin-applied electrode arrays.
The process is general enough to include additional layers for multi-layer stacking, as well as to incorporate additional elements such as photodiodes. Photodiodes integration with CNT electrode array would enable neuronal stimulation using light, a desirable feature in retinal implants aimed at substituting degenerated photoreceptors.
This scheme has several notable advantages over previously proposed concepts. Foremost, it is simple for implementation, requiring only two independent lithographic steps. Unlike dispersion methods, the use of standard lithography allows high resolution patterning of the CNT film and a simple integrating of the CNT pattern with the polymer substrate. Moreover, the entire device it based only on very few elementary fabrication steps. Additionally, the device benefits from strong overall stability against peeling and degradation due to seamless integration between the electrodes and conducting traces. Finally and most importantly, at no stage of the process, the surfaces of the CNT electrodes are exposed to any solvents, photo-resists, or electro-plating baths rendering the entire process very clean, and therefore ensuring the non-Faradaic nature of the electrodes.
While the process described above appears to be straight forward, two critical properties must be carefully maintained to guarantee proper function of the end device. Foremost, is the high effective surface area of the electrodes. Clean CNTs have outstanding electrochemical properties, however, impurities and polymeric residues can dramatically hamper the proper operation of the electrodes. Indeed, we have noticed that the cleanliness of the electrode surface can be compromised if the polymer (e.g. PDMS) penetrates the CNT film. The second critical requirement is the electrical conductivity of the CNT interconnects.
Characterization of flexible CNT devices
We begin by discussing the cleanliness of the CNT films and their electrochemical properties. We found that different polymers and deposition methods (e.g. spin coating, applying adhesive tape, and vapor deposition) dramatically affect the extent of the polymer penetration into the film. Accordingly, careful validation of the morphological and electrochemical properties of the electrodes is important. Validation was achieved by using electrode arrays with different electrode diameter (100, 150, (1) The process is based on a single photolithographically defined Ni catalyst layer. (2) The CNT film is then grown using a CVD process.
(3) Next, the film is transferred to a polymeric support (e.g. medical adhesive tape, PDMS, Parylene C, polyimide). (4) Finally, a second polymeric layer (PDMS) with predefined holes is bonded with the CNT carrying film for passivation. b Different patterns of flexible CNT electrode arrays on different support layers: (1) PDMS, (2) medical adhesive tape, (3) Parylene C and (4) polyimide 200, 250, 300, 350, 400 and 450 μm). Electrode arrays were realized following the scheme depicted in Fig. 1a and were then systematically tested. The CNT film cleanliness was first validated qualitatively using environmental scanning electron microscopy (ESEM). Figure 2a shows ESEM images of a typical CNT surface on a medical adhesive tape (Fig. 2a-1), on Parylene C (Fig. 2a-2) and on PDMS (Fig. 2a-3). While part of the CNT film is embedded in the cured PDMS, the top surface of the CNTs is clearly exposed (Fig. 2a-3). Apparently clean, highly intertwined MWCNTs were observed on the medical tape and on the Parylene C surfaces (Fig. 2a-1 and a-2). Cross section image of the CNT film on a medical adhesive tape ( Fig. 2a-1, inset) demonstrates a CNT film on top of the flexible medical tape substrate. Under proper preparation conditions clean CNT films were reliably transferred to all different flexible substrates described above (see Section 2).
The ESEM imaging was followed by electrochemical characterization using CV that records current resulting from scanning the applied voltage, and EIS, which measures frequency-dependent changes in the impedance. CV and EIS measurements were performed with a three-electrode cell configuration using phosphate buffered saline (PBS) and Ag/AgCl reference electrode. The CV data (Fig. 2b) is markedly flat, showing no signs of reactivity, as expected from clean CNT electrodes (Gabay et al. 2007). Current versus scan-rate plots show clear linear dependence (Fig. 2c) in accordance with a double layer capacitor model. Finally, the capacitance of different size electrodes was calculated and plotted and the specific capacitance value was derived, yielding values as high as 2 mFcm −2 (Fig. 2d). Variation of the impedance with frequency (1 Hz to 10 kHz) is presented in Fig. 2e. The impedance of a 100 μm diameter CNT electrode (including its long conducting trace) at biologically relevant frequency for neural recording of 1 kHz is 55 kΩ. The electrochemical measurements were also used as a tool to directly quantify the extent of the clean surface. Sensitive surface analytical methods such as X-ray photoelectron spectroscopy (XPS) could also be used as complementary tool to electrochemical measurements.
Since the CNT films also constitute the circuit lines of our devices, their electrical resistance is consequential (Agrawal et al. 2007). The electrical performances of MWCNTs depend on many factors such as average length, diameter, wall number, structural defects, film thickness, and the amount of amorphous carbon (Ferrari and Robertson 2000). While some of these parameters can be controlled in the growth process to optimize the conductivity of the films, CNT films generally suffer from poor conductivity compared with typical metals. We note that for our device needs, owing to the large electrode-solution impedance, exceptionally high trace conductivity is not critically important and values in the order of several kilo ohms are acceptable.
To validate the CNT film quality and to quantify the electrical conductivity values, CNT films were characterized using Raman spectroscopy (RS) and direct electrical measurements respectively. RS was performed to characterize the nature and the quality of the MWCNT films (Fig. 2f). Raman spectrum of the CNT films show two distinct peaks at 1,360 (D-band) and 1,580 cm−1 (G-band) (Thomsen and Reich 2007). We used the ratio between the D and the G band (I D /I G ) as a crude characterization of the defect density and each CNT film was measured at 20 different sites. The I D /I G for all films was higher than one, indicating fairly poor film quality associated with the highly entangled CNTs. However, we have extensively used similar films in the past to perform recording from dissociated neurons (Gabay et al. 2007;Shein et al. 2009) and from mouse retina (Shoval et al. 2009) with excellent results. The obtained films are thus very well suited for neuronal stimulation. To validate the durability of the CNT films upon mechanical stress we have tested the electrical properties of the CNT films following repeated cycles (up to 30 cycles) of folding and winding. No significant change in film resistivity was identified, during or after these manipulations. A major concern when considering the biocompatibility of the CNT electrodes is Ni traces and we have tested our CNT electrodes for Ni traces and performed biocompatibility tests by culturing rat cortical cells on the CNT films (according to a previously reported protocol (Shein et al. 2009)). We have conducted energy-dispersive x-ray spectroscopy (EDS) tests that revealed very small residues of Ni. Apparently Ni is effectively embedded in the CNTs and has no adverse effects.
Finally, to reliably measure the electrical resistance of the MWCNT traces, a special testing scheme was implemented. Sets of different length MWCNT bars (width and height remained constant) were fabricated ( Supplementary Fig. 2) with TiN contacts (TiN-CNTs-TiN). The TiN pads are instrumental to achieve reliable Ohmic contacts to the CNT films, guarantying consistent measurements. It should be noted that while the contact resistance of TiN is substantial, TiN is a conducting material most suitable for CNT growth under the high temperature of the CVD process and therefore is a very convenient material to perform the film resistance validation discussed here. Current versus voltage trace for each TiN-CNTs-TiN bar was recorded and the electrical resistance was calculated. All samples exhibited an Ohmic behavior with values ranging between 2 and 15 kΩ. To derive their sheet resistance, electrical resistance values were plotted versus the number of squares in each bar ( Fig. 2g; for explanation on sheet resistance calculations see Supplementary Fig. 3). Values ranging between 160 and 1,850 Ω/□ for different CVD growth conditions of the MWCNT film were obtained. Owing to the high electrode-electrolyte impedance values, we conclude that the CVD grown MWCNT films are conducting well enough to be readily used as effective conducting traces for our application.
Extracellular neuronal recording and stimulation using the flexible CNT MEA
Having established the electrical as well as the electrochemical properties of the CNT films, we now turn to describe the electrophysiological performances of the flexible electrodes. An electrode array compatible with a standard multi-electrode array recording and stimulation setup was realized on a printed circuit board (PCB) support (Fig. 3a). The array consists of 16 electrodes on a medical tape support each connected to an external pad. A top PDMS passivation layer, 150 μm thick and with 50 μm diameter holes, was used to define the effective size of the electrodes (Fig. 3a, inset). The flexible array was then mounted onto the PCB carrier to accommodate the link between the electrodes and external amplifiers. A glass cylinder was glued to the PCB support to serve as a well for the physiological medium.
Embryonic chick retina (day 14) was used as a neuronal model. The retina was extracted and transferred to the medium chamber under physiological conditions. The retina was then flattened on the electrode array (Fig. 3a), with the retinal ganglion cell (RGC) layer facing down (as in an epi-retinal implant) and was anchored with a weight. Figure 3b illustrates a circuit model for extracellular recording and stimulation of neuronal tissue using the micro electrode array, depicting the electrochemical interface resistance and the capacitance of the CNT electrode as well as the solution derived shunt capacitance and the stimulation point.
At day 14 the embryonic retina is still at an early developmental stage and clear spontaneous activity waves were recorded demonstrating the overall functionality of the device and the setup. We next tested the CNT electrodes suitability to evoke electrical activity in the retina tissue. Stimulation was achieved at currents as low as 4 μA ( Fig. 4a and b) and stimulation pulse width of 1 ms. With nearly perfectly capacitive electrodes, these values are well within the limits of safe stimulation. The observed electrical response is typical for pre-synaptic cells activation. Validation of the synaptic processes was achieved with the use of the synaptic blockers 6cyano-7-nitroquinoxaline-2,3-dione (CNQX) and 2-amino-5phosphonovaleric acid (APV). 400 s after the introduction of the synaptic blockers no retinal ganglion cell activation was measured (Fig. 4c).
Discussion and conclusions
We can now turn to look at how our new CNT electrodes rank compared with previously reported technologies. Table 1 summarizes specific DC capacitance, stimulation threshold and SNR values obtained with other CNT and flexible electrode technologies. The table refers to studies that demonstrated either recording or stimulation of neuronal activity. DC capacitance values of 1-10 mFcm −2 were measured from most CNT electrodes on both rigid and flexible substrates. The all CNT flexible MEA presented in this study is well within this range with 2 mFcm −2 , exceeding both CNT electrodes grown directly on flexible polyimide with 0.1 mFcm −2 (Hsu et al. 2010;Chen et al. 2011b) as well as Pt electrodes coated with SWCNT (drop coating) on a rigid Pyrex substrate with 4.5·10 −6 mFcm −2 (Gabriel et al. 2009). It should be noted that SNR and stimulation threshold values depend on the examined tissue as well as on the size and shape of the electrode. Therefore they cannot be used as a direct measure of MEA devices. The SNR in particular, provides only a validation for the acceptable performance of the electrodes. A stimulation threshold of 4 nC measured by our flexible CNT MEA is lower than that reported by other CNT MEA We have shown that our new flexible all CNT MEA performances are equivalent to rigid CNT technologies with the obvious major advantage of being flexible. Two flexible CNT technologies used for neuronal recording and stimulation were reported before. Lin and co-workers fabricated a vertically aligned CNT (VACNT) MEA embedded in Parylene-C film ) while Hsu and co-workers used low temperature CVD (i.e. 400°C) to directly grow CNT MEA on polyimide (Hsu et al. 2010;Chen et al. 2011b). The specific capacitance of our CNT MEA is significantly higher than that of the directly grown CNTs on polyimide. Compared with both rigid and flexible technologies, our device benefits from the advantages of a very clean and simple fabrication scheme and most importantly a seamless integration between the electrode and the circuit, ultimately supporting a reliable and scalable fabrication of state of the art flexible MEAs. To conclude this discussion, our novel flexible CNT electrodes, as other clean carbon based electrodes, are distinguished by having a clear capacitive nature. Being produced by a simple, clean and robust process, these electrodes properties surpass previously described technologies.
Notwithstanding these promising results, some improvements in the fabrication scheme are desirable. For example, the 150 μm insulation layer locates our electrodes at a significant distance from the tissue, limiting the spatial resolution of the device as reflected in the relative low amplitude of the recorded signals (Fig. 4a). Reducing the thickness of the insulation layer will also improve the adhesion of the tissue to the electrodes, further promoting the spatial resolution.
To summarize, a new scheme based on CNTs was presented and demonstrated as an advantageous approach to form high performance neuronal electrode array devices. The electrodes gain their performances from the combination of several different CNT properties. Foremost, CNTs films have extremely large surface area making them very effective electrochemical electrode with capacitive charge injection mechanism. CNTs are also inert and strong, making the electrodes stable in biological conditions. As CNT films are suitable to withhold bending, they are very well suited for flexible electronic applications. In the realm of multi-electrode arrays, this feature is particularly important as flexible MEA devices are of great interest for implantable applications. Unlike other coatings that may tend to crack and disconnect from the flexible substrate during bending, CNT films are durable owing to their remarkable mechanical properties and the unique structure of the MWCNTs film. The entangled bundles of tubes, forming a dense and continuous yet porous film, make these films particularly optimal for neuronal applications. Additionally, the adhesion between the CNTs and the polymeric substrate is strong, making the CNT film an integrated part of the substrate. Since CNTs are chemically inert they are also durable against corrosion, a very common challenge in conventional metal technology in biological applications. The circuit structure is seamless and all elements, connecting pads, conducting traces and electrodes are made of CNT. This is an enormous advantage for both in vivo and in vitro long term use since it eliminates delamination of the coatings and the formation of cracks. These cracks result with leakage currents and failure of the device as often occurs with layering and connection of different materials. All these properties are added to the relatively simple and robust fabrication process discussed above. This fabrication process can be easily extended to include elements such as photodiodes and allows for stacking of different functionality layers, make the all-carbon- Fig. 4 Electrical recording and stimulation of chick retina with flexible CNT MEA. a Evoked activity using a biphasic cathodic first pulse (arrowhead). The large signal at t=0 is an artifact of the stimulation. b Firing rate of evoked activity at different stimulation intensities (3-10 nC). c Firing rate of evoked activity after synaptic blockers CNQX and APV application (stimulation was applied every 10 s). After 400 s no retinal ganglion cells activation is observed Calculated from data in the article nanotube flexible neural electrodes, presented here, a promising element in future neuro-prosthetic devices.
|
package at.ac.tuwien.sepm.groupphase.backend.service.impl;
import at.ac.tuwien.sepm.groupphase.backend.endpoint.dto.Filter.MessageFilter;
import at.ac.tuwien.sepm.groupphase.backend.entity.ApplicationUser;
import at.ac.tuwien.sepm.groupphase.backend.entity.Message;
import at.ac.tuwien.sepm.groupphase.backend.entity.Reaction;
import at.ac.tuwien.sepm.groupphase.backend.entity.Spot;
import at.ac.tuwien.sepm.groupphase.backend.exception.NotFoundException;
import at.ac.tuwien.sepm.groupphase.backend.exception.ServiceException;
import at.ac.tuwien.sepm.groupphase.backend.exception.ValidationException;
import at.ac.tuwien.sepm.groupphase.backend.exception.WrongUserException;
import at.ac.tuwien.sepm.groupphase.backend.repository.MessageRepository;
import at.ac.tuwien.sepm.groupphase.backend.repository.ReactionRepository;
import at.ac.tuwien.sepm.groupphase.backend.repository.SpotRepository;
import at.ac.tuwien.sepm.groupphase.backend.repository.UserRepository;
import at.ac.tuwien.sepm.groupphase.backend.service.HashtagService;
import at.ac.tuwien.sepm.groupphase.backend.service.MessageService;
import at.ac.tuwien.sepm.groupphase.backend.service.SpotSubscriptionService;
import at.ac.tuwien.sepm.groupphase.backend.service.validator.MessageValidation;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.stereotype.Service;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Optional;
@Service
@RequiredArgsConstructor
@Slf4j
public class SimpleMessageService implements MessageService {
private final MessageRepository messageRepository;
private final ReactionRepository reactionRepository;
private final SpotRepository spotRepository;
private final HashtagService hashtagService;
private final SpotSubscriptionService spotSubscriptionService;
private final UserRepository userRepository;
@Override
public List<Message> findBySpot(Long spotId) throws NotFoundException {
if (spotRepository.findById(spotId).isEmpty()) {
throw new NotFoundException(String.format("Spot with id %d not found.", spotId));
}
log.debug("Find all messages");
deleteExpiredMessages();
List<Message> messageList = messageRepository.findBySpotIdOrderByPublishedAtAsc(spotId);
messageList.forEach(this::setReactions);
return messageList;
}
@Override
public Page<Message> findBySpotPaged(Long spotId, Pageable pageable) throws NotFoundException {
if (spotRepository.findById(spotId).isEmpty()) {
throw new NotFoundException(String.format("Spot with id %d not found.", spotId));
}
log.debug("Find all messages");
deleteExpiredMessages();
Page<Message> result = messageRepository.findAllBySpotId(spotId, pageable);
result.forEach(this::setReactions);
return result;
}
@Override
public Message create(Message message) throws NotFoundException, ValidationException {
log.debug("create message in spot with id {}", message.getSpot().getId());
MessageValidation.validateNewMessage(message);
Optional<Spot> spot = spotRepository.findById(message.getSpot().getId());
if (spot.isEmpty()) {
throw new NotFoundException("Spot does not Exist");
}
message.setPublishedAt(LocalDateTime.now());
message.setOwner(userRepository.findApplicationUserByEmail(SecurityContextHolder.getContext().getAuthentication().getName()).get());
Message savedMessage = messageRepository.save(message);
hashtagService.acquireHashtags(message);
spotSubscriptionService.dispatchNewMessage(savedMessage);
return savedMessage;
}
@Override
public Message getById(Long id) throws NotFoundException {
log.debug("get message with id {}", id);
deleteExpiredMessages();
Optional<Message> messageOptional = messageRepository.findById(id);
if (messageOptional.isEmpty()) {
throw new NotFoundException("No messages found");
}
Message message = messageOptional.get();
setReactions(message);
return message;
}
@Override
public void deleteById(Long id) throws NotFoundException, WrongUserException, ServiceException {
Optional<Message> messageOptional = messageRepository.findById(id);
if (messageOptional.isEmpty()) {
throw new NotFoundException(String.format("No message with id %d found!", id));
} else if (!messageOptional.get().getOwner().getEmail().equals(SecurityContextHolder.getContext().getAuthentication().getName()) && !SecurityContextHolder.getContext().getAuthentication().getAuthorities().stream().anyMatch(a -> a.getAuthority().equals("ROLE_ADMIN"))) {
throw new WrongUserException("You can only delete your own messages");
}
try {
hashtagService.deleteMessageInHashtags(messageOptional.get());
} catch (ValidationException e) {
throw new ServiceException(e.getMessage());
}
reactionRepository.deleteAllByMessage_Id(id);
messageRepository.deleteById(id);
spotSubscriptionService.dispatchDeletedMessage(messageOptional.get().getSpot().getId(), id);
}
@Override
public void deleteByIdWithoutAuthentication(Long id) throws NotFoundException, ServiceException {
Optional<Message> messageOptional = messageRepository.findById(id);
if (messageOptional.isEmpty()) {
throw new NotFoundException(String.format("No message with id %d found!", id));
}
try {
hashtagService.deleteMessageInHashtags(messageOptional.get());
} catch (ValidationException e) {
throw new ServiceException(e.getMessage());
}
reactionRepository.deleteAllByMessage_Id(id);
messageRepository.deleteById(id);
spotSubscriptionService.dispatchDeletedMessage(messageOptional.get().getSpot().getId(), id);
}
private void setReactions(Message message) {
message.setUpVotes(
reactionRepository.countReactionByMessage_IdAndType(message.getId(), Reaction.ReactionType.THUMBS_UP));
message.setDownVotes(
reactionRepository.countReactionByMessage_IdAndType(message.getId(), Reaction.ReactionType.THUMBS_DOWN));
Authentication auth = SecurityContextHolder.getContext().getAuthentication();
if (auth != null) {
List<Reaction> reaction = reactionRepository.getReactionByOwnerEmail(SecurityContextHolder.getContext().getAuthentication().getName(), message.getId());
if (reaction == null || reaction.size() == 0) {
message.setOwnerReaction(null);
message.setOwnerReactionId(null);
} else if (reaction.get(0).getType().equals(Reaction.ReactionType.THUMBS_DOWN)) {
message.setOwnerReaction(Reaction.ReactionType.THUMBS_DOWN);
message.setOwnerReactionId(reaction.get(0).getId());
} else {
message.setOwnerReaction(Reaction.ReactionType.THUMBS_UP);
message.setOwnerReactionId(reaction.get(0).getId());
}
}
}
@Override
public Page<Message> filter(MessageFilter messageFilter, Pageable pageable) {
log.debug("Searching for messages of spots belonging to the category " + messageFilter.getCategoryId() + ", not older than: " + messageFilter.getTime());
if (messageFilter.getCategoryId() == null) {
messageFilter.setCategoryId(0L);
}
if (messageFilter.getTime() == null) {
messageFilter.setTime(LocalDateTime.MIN);
}
if (messageFilter.getHashtagName() == null || messageFilter.getHashtagName().isEmpty()) {
return messageRepository.filter(messageFilter.getCategoryId(), messageFilter.getUser(), messageFilter.getTime(), pageable);
}
return messageRepository.filterHash(messageFilter.getCategoryId(), messageFilter.getUser(), messageFilter.getTime(), messageFilter.getHashtagName(), pageable);
}
private void deleteExpiredMessages() {
List<Message> deletedExpiredMessages = messageRepository.deleteAllByExpirationDateBefore(LocalDateTime.now());
if (deletedExpiredMessages.size() > 0) {
deletedExpiredMessages.forEach(message -> {
log.info("Deleted expired message with Id {}", message.getId());
});
}
}
@Override
public List<Message> findByOwner(Long userId) throws NotFoundException {
Optional<ApplicationUser> owner = this.userRepository.findById(userId);
if (owner.isPresent()) {
return this.messageRepository.findByOwner(owner.get());
} else {
throw new NotFoundException("User with ID " + userId + " not found.");
}
}
}
|
Influence of the Sun Position and Platform Orientation on the Quality of Imagery Obtained from Unmanned Aerial Vehicles
Images acquired at a low altitude can be the source of accurate information about various environmental phenomena. Often, however, this information is distorted by various factors, so a correction of the images needs to be performed to recreate the actual reflective properties of the imaged area. Due to the low flight altitude, the correction of images from UAVs (unmanned aerial vehicles) is usually limited to noise reduction and detector errors. The article shows the influence of the Sun position and platform deviation angles on the quality of images obtained by UAVs. Tilting the camera placed on an unmanned platform leads to incorrect exposures of imagery, and the order of this distortion depends on the position of the Sun during imaging. An image can be considered in three-dimensional space, where the x and y coordinates determine the position of the pixel and the third dimension determines its exposure. This assumption is the basis for the proposed method of image exposure compensation. A three-dimensional transformation by rotation is used to determine the adjustment matrix to correct the image quality. The adjustments depend on the angles of the platform and the difference between the direction of flight and the position of the Sun. An additional factor regulates the value of the adjustment depending on the ratio of the pitch and roll angles. The experiments were carried out for two sets of data obtained with different unmanned systems. The correction method used can improve the block exposure by up to 60%. The method gives the best results for simple systems, not equipped with lighting compensation systems.
|
def ecdsa_signature_parse_der(der_sig: bytes) -> Secp256k1ECDSASignature:
sig = ctypes.create_string_buffer(INTERNAL_SIGNATURE_LENGTH)
result = lib.secp256k1_ecdsa_signature_parse_der(
secp256k1_context_verify, sig, der_sig, len(der_sig)
)
if result != 1:
assert_zero_return_code(result)
raise Libsecp256k1Exception("signature could not be parsed")
return sig
|
/**
* Recalculate player actions (movement and shooting). Called from recalculate().
* @param frameTime frame time, in seconds.
*/
private void recalculatePlayerActions(double frameTime) {
boolean leftKeyDown = this.getKeyState(KeyEvent.VK_LEFT);
boolean rightKeyDown = this.getKeyState(KeyEvent.VK_RIGHT);
if(leftKeyDown && !rightKeyDown) {
this.player.move(-1, frameTime);
this.hasChanged = true;
} else if(!leftKeyDown && rightKeyDown) {
this.player.move(1, frameTime);
this.hasChanged = true;
}
boolean spaceKeyDown = this.getKeyState(KeyEvent.VK_SPACE);
if(this.playerProjectile == null && spaceKeyDown) {
this.playerProjectile = new Projectile(this.player.getPosition(), 0.9);
this.hasChanged = true;
}
if(this.playerProjectile != null) {
this.playerProjectile.move(frameTime);
if(this.playerProjectile.isInvalid()) {
this.playerProjectile = null;
}
this.hasChanged = true;
}
}
|
Every year they get closer: UFO fan hails clearest video yet and says 'they know I'm filming them'
It seems to be world centre for UFO videos, but this the image is extraordinarily clear.
UFO enthusiast Michael Lee Hill, of Eastlake, has recorded many images of something mysterious over Lake Erie, Ohio.
He has just released new footage filmed along the lakefront that he says is his best so far.
Scroll down for video
Clear and not moving: The bright object filmed over Lake Erie in Ohio
Mr Hill said: 'It was probably one of the craziest nights of activity I've ever experienced.'
The images show several sets of coloured lights flashing over Lake Erie, while appearing to remain still.
Mr Hill said: 'I've been filming these for a long time and it's just grown to the point where they've let me know, they know I'm sittin' there filming them.
'You know, they come in - every year they've come in closer and closer.'
Bizarre claim: UFO enthusiast Michael Lee Hill believes the objects in his films are aware that he's filming them
Several people in the area believe the lake is a hotbed for extraterrestrial activity.
Ed Husa lives in Euclid and often walks his dog in a local park.
'I go outside and I look up in the sky and I see them all the time up there.
'Every so often you see strange lights blinking on and off.'
Fox 8 News revealed similar images of lights blinking over Lake Erie last year.
That led a team from the Cleveland Ufology Project to track the lights.
Hanging in there: The object shines brightly as it sits motionless above the lake
The organisation was founded back in 1952. Its members consider themselves experts and decided that the images were airplanes.
Hill disagreed with their assessment last year and now he says he's even more confident the aliens are among us.
'The fact that this remains in the screen and is not moving, I believe, negates any notion that it's an aircraft,' he said.
Previously, NASA, which has a research facility in Cleveland, has not commented on the sightings over Lake Erie. Officials at Hopkins International Airport said planes from the U.S. and Canada take-off frequently over the water.
|
package model
type CodexSubmission struct {
QuestionID string `json:"question_id"`
SourceCode string `json:"source_code"`
Language string `json:"language"`
TimeLimit string `json:"time_limit"`
TestCases []*SandboxStdin `json:"test_cases"`
}
type SandboxStdin struct {
ID string `json:"id"`
Input string `json:"input"`
ExpectedOutput string `json:"expected_output"`
IsPrivate bool `json:"is_private"`
}
type SandboxStdout struct {
TestCaseID string `json:"test_case_id"`
Status string `json:"status"`
Log *SandboxLog `json:"log"`
Error error `json:"error"`
}
type SandboxTestStdout struct {
TestCaseID string `json:"test_case_id"`
Status string `json:"status"`
Log *SandboxLog `json:"log"`
Error *SandboxTestStdoutError `json:"error"`
}
type SandboxTestStdoutError struct {
Message string `json:"message"`
}
type SandboxLog struct {
ID int `json:"id"`
Stdin string `json:"stdin"`
Stdout string `json:"stdout"`
Stderr string `json:"stderr"`
RunningTime string `json:"running_time"`
ExpectedStdout string `json:"expected_stdout"`
}
|
N, M = map(int, input().split())
H = list(map(int, input().split()))
OBS = []
for i in range(0, N):
OBS.append(1)
for i in range(0, M):
A = 0
B = 0
A, B = map(int, input().split())
if H[A - 1] > H[B - 1]:
OBS[ B - 1 ] = 0
elif H[A - 1 ] < H[B - 1]:
OBS[ A - 1 ] = 0
else:
OBS[A - 1] = 0
OBS[B - 1] = 0
cnt = 0
for i in OBS:
if i == 1:
cnt += 1
# print(OBS)
print(cnt)
|
from sys import stdin, stdout
from math import sqrt, floor
n = int(stdin.readline())
arr = list(map(int, stdin.readline().split()))
arr.sort()
for i in range(1, n - 1, 2):
arr[i], arr[i - 1] = arr[i - 1], arr[i]
ans = 0
for i in range(1, n - 1, 2):
if arr[i] < min(arr[i - 1], arr[i + 1]):
ans += 1
stdout.write("{}\n".format(ans))
stdout.write(" ".join(map(str, arr)))
|
#include <stdio.h>
#include <stdlib.h>
const long N=166666;
int max(long a,long b)
{
if(a>b)return a;
else return b;
}
long len_of_goodsegment(long long a[N],long n)
{
long count=2,ret=2,i;
for(i=2;i<n;i++)
{
if(a[i]==a[i-1]+a[i-2])
{
count++;
}
else
{
count=2;
}
ret= max(ret,count);
}
return ret;
}
int main()
{
long long a[N];
long num;
long n,i=0;
scanf("%ld",&n);
for(; i<n; i++)
{
scanf("%I64d",&a[i]);
}
if(n==1) printf("1");
else if(n==2) printf("2");
else
{
num=len_of_goodsegment(a,n);
printf("%ld",num);
}
return 0;
}
|
/**
* internal use only, set current inner context
* @param newCurrentInnerContext current inner context
*/
public static void internal_assignCurrentInnerContext(GrouperContext newCurrentInnerContext) {
if (newCurrentInnerContext == null) {
currentInnerContext.remove();
} else {
currentInnerContext.set(newCurrentInnerContext);
}
}
|
Hadas Gold interviews Michelle Fields and her boyfriend Jamie Weinstein, the Daily Caller editor who began the public conversation about an incident with Corey Lewandowski by tweeting “Trump always surrounds himself [with] thugs.”
From Politico:
Michelle Fields says she wouldn’t change how she has handled the events of the past month or so. She only wishes she could go back in time and decline the request to cover a Donald Trump news conference in place of a colleague who was sick.
Then, she told POLITICO over drinks at a Washington, D.C. hotel bar, she would never have come into contact with Trump campaign manager Corey Lewandowski, and she would still have a job she loved as a reporter for the conservative website Breitbart.
Instead, she said, she has had to leave that job, move from her apartment because of threats and handle the publicity that comes with challenging a presidential candidate’s campaign machine known for never saying sorry or backing down.
At that March 8 news conference at the Trump National Golf Club in Jupiter, Florida, Fields tried to get Trump’s attention to ask a question about affirmative action as he walked through the crowded ballroom toward the exit, surrounded by Secret Service. But as she asked her question, she was almost immediately moved out of the way by Lewandowski — with enough force to leave bruises on her arm, which she later documented with photos.
Her boyfriend, Daily Caller senior editor Jamie Weinstein, who joined Fields for her interview with POLITICO and has been public about his dislike of Trump, quickly tweeted a message, calling Lewandowski a “thug” and saying that he almost brought Fields down to the ground after she asked a question.
“I thought the best way to force an apology or some contrition was by making it public,” Weinstein told POLITICO.
Fields jumped in here, saying she doesn’t regret that Weinstein tweeted about the incident because ultimately, through the entire drama, it helped “reveal the character of [Lewandowski] and the Trump campaign.”
And that tweet is where the drama and the media frenzy began. Fields placed most of the blame on the escalation of the events on the Trump campaign, for changing their story and questioning her character. She insisted that all she ever wanted was an apology, that she went to the police just to prove she was not lying. She said the media narrative got wrapped up in litigating the details of the case — how hard she was actually pulled, why such a simple act was considered battery, whether she touched Trump first — versus how the campaign was handling what could otherwise have been a quickly resolved situation. And she questioned the tactics and motives of the Florida state attorneys who brought charges against Lewandowski but ultimately declined to pursue a case.
Read the rest of the story here.
|
from sys import stdin
from heapq import *
Q = int(stdin.readline())
customer_id = 1
pq = []
first = 1
ans = []
served = set()
for q in range(Q):
args = stdin.readline().split()
if args[0] == "1":
heappush(pq, [-1*int(args[1]), customer_id])
customer_id += 1
elif args[0] == "2":
ans.append(first)
served.add(first)
while first in served: first += 1
else:
while pq[0][1] in served: heappop(pq)
price, c_id = heappop(pq)
ans.append(c_id)
served.add(c_id)
while first in served: first += 1
print(*ans)
|
def remove_instance(self, instance, delete_instance=False,
ignore_missing=True):
instance = self._get_resource(_instance.Instance, instance)
return instance.remove(self._session,
delete_instance=delete_instance,
ignore_missing=ignore_missing)
|
def message_parse(self, message, save_original=False):
if not isinstance(message, Message):
raise ValueError(_('Message should be a valid Message instance'))
msg_dict = {'message_type': 'email'}
message_id = message.get('Message-Id')
if not message_id:
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id.strip()
if message.get('Subject'):
msg_dict['subject'] = tools.decode_message_header(message, 'Subject')
email_from = tools.decode_message_header(message, 'From', quoted=True)
email_cc = tools.decode_message_header(message, 'cc', quoted=True)
email_from_list = tools.email_split_and_format(email_from)
email_cc_list = tools.email_split_and_format(email_cc)
msg_dict['email_from'] = email_from_list[0] if email_from_list else email_from
msg_dict['from'] = msg_dict['email_from']
msg_dict['cc'] = ','.join(email_cc_list) if email_cc_list else email_cc
msg_dict['recipients'] = ','.join(set(formatted_email
for address in [
tools.decode_message_header(message, 'Delivered-To', quoted=True),
tools.decode_message_header(message, 'To', quoted=True),
tools.decode_message_header(message, 'Cc', quoted=True),
tools.decode_message_header(message, 'Resent-To', quoted=True),
tools.decode_message_header(message, 'Resent-Cc', quoted=True)
] if address
for formatted_email in tools.email_split_and_format(address))
)
msg_dict['to'] = ','.join(set(formatted_email
for address in [
tools.decode_message_header(message, 'Delivered-To', quoted=True),
tools.decode_message_header(message, 'To', quoted=True)
] if address
for formatted_email in tools.email_split_and_format(address))
)
partner_ids = [x.id for x in self._mail_find_partner_from_emails(tools.email_split(msg_dict['recipients']), records=self) if x]
msg_dict['partner_ids'] = partner_ids
msg_dict['references'] = tools.decode_message_header(message, 'References')
msg_dict['in_reply_to'] = tools.decode_message_header(message, 'In-Reply-To').strip()
if message.get('Date'):
try:
date_hdr = tools.decode_message_header(message, 'Date')
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.info('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if msg_dict['in_reply_to']:
parent_ids = self.env['mail.message'].search([('message_id', '=', msg_dict['in_reply_to'])], limit=1)
if parent_ids:
msg_dict['parent_id'] = parent_ids.id
msg_dict['internal'] = parent_ids.subtype_id and parent_ids.subtype_id.internal or False
if msg_dict['references'] and 'parent_id' not in msg_dict:
references_msg_id_list = tools.mail_header_msgid_re.findall(msg_dict['references'])
parent_ids = self.env['mail.message'].search([('message_id', 'in', [x.strip() for x in references_msg_id_list])], limit=1)
if parent_ids:
msg_dict['parent_id'] = parent_ids.id
msg_dict['internal'] = parent_ids.subtype_id and parent_ids.subtype_id.internal or False
msg_dict.update(self._message_parse_extract_payload(message, save_original=save_original))
msg_dict.update(self._message_parse_extract_bounce(message, msg_dict))
return msg_dict
|
The way Educators teach and the way students learn has changed dramatically over the last several years. Instead of assigning worksheets, teachers are now assigning online projects that bring a richer environment of learning to the classroom. Instead of students putting together a folder with all their work and assignments to be shared with one person at a time, students are now creating e-portfolios to be shared with hundreds of people at once via social media.
Education has gone digital. Students and teachers can no longer expect paper to be the only source of exchange for learning and understanding. To accommodate this ever evolving world of information, teachers and students both need an online tool where they can collect, share, reflect, and grow from their learning.
This is where a tool like LiveBinders comes in. LiveBinder is your digital binder for all of your online content and learning.
The aim of this post is to provide all the information you need to learn how to use Livebinder effectively for yourself and with your students.
Click on a link below to go to the section you want to read:
Introduction – What are LiveBinders?
LiveBinders is your digital binder for all of your online content and learning.
According to LiveBinders.com:
“Physical 3-ring binders used to be the easiest way to organize all your educational resources. But now so much of what you want to organize is online. LiveBinders not only replaces the old 3-ring binder, but also opens up new opportunities for collaborating, organizing, and sharing that were never possible before.”
With your LiveBinder, you can create a central hub full of resources on a topic that you choose. Most everything that you can think of can be added to your LiveBinder – links to websites, YouTube videos, Flickr images, PDF/Microsoft Word documents, and QR codes are just some examples of rich content for your LiveBinder.
Physical binders typically do not leave the classroom or ever come out of the backpack. Sharing is limited to those who the student can physically hand the binder too. LiveBinders on the other hand, allows students to share their binders via Twitter, Facebook, Google +, Plurk, or email. Students can also embed their binders on their blogs or websites. Their binders can go well beyond the walls of the classroom and across hundreds of people who never would have seen their learning if it was confined to a physical binder.
Students can also comment on other students’ binders and rate their LiveBinder as “useful.” If students are curious about how many people have actually viewed their LiveBinder, LiveBinders provides a view count on the LiveBinder itself. Students can learn and grow from these experiences.
If you ever wanted a binder to be enhanced with these tools and offerings, you can get started today. For a free basic account, click here to be directed to the sign-up page.
Back to Top
About the Educator’s Guide to LiveBinders
The sole purpose of this guide is directed towards you, the teacher.
Starting with something new can always be a bit tricky at first. It is completely natural and expected that you will have questions throughout this learning process. This is why the “Educator’s Guide to LiveBinders” was created. With the resources that this guide will provide, the goal is to make your progress with LiveBinders a smooth and streamlined learning phase so you can be confident about how you are using LiveBinders for your own purposes and with your students.
Each section of this guide was carefully considered and thought through to offer the best guide to LiveBinders for educators. Being an educator takes dedication. You are constantly perfecting your skills in lesson planning and the effectiveness of your materials to assure that each one of your students has the best available resources to learn. Rest assured that the same amount of dedication and effort that you put into your profession has been put into this guide.
Back to Top
Why Use LiveBinders?
LiveBinders can be your one central source to collect resources, share resources, and collaborate with other educators and technology enthusiasts. You can collect and curate resources that include: web sites, documents, photos and many other things in your LiveBinder.
In today’s classroom, students and teachers use the power of the internet to gather resources for projects, resources for papers, and resources to share in the classroom.
Here are some of my favorite ideas for students using LiveBinders:
Collect/organize blog posts – their own or fellow classmates
Create a “My Evernote” tab – students can insert links to certain Evernote notes
Research – students can use the “LiveBinder It” browser extension to add a website link to their binder
Comment on fellow classmate’s binders – LiveBinders allow you to add comments to a binder, which is great for peer review.
Upload lecture notes – whether it be an Evernote note link or an MS Word doc, students can upload/insert their class lecture notes.
There a multiple reasons to use LiveBinders in and out of the classroom. It’s free and very easy to use. The creators of Livebiners, Tina and Barbara (@livebinders), provide the best support I’ve ever experienced. They love to share other’s LiveBinders and love to hear how what they created is making a difference.
Getting Started
Before you can start using LiveBinders in your classroom, you’ll want to create a free LiveBinders account. Signing up is quick and simple!
Back to Top
Step 1: Creating An Account
First, head to the LiveBinders website at www.livebinders.com. On the homepage, you’ll see the “Sign Up” link at the top right hand corner, see below:
On the next page, you’ll see the form to sign up for your LiveBinders account.
Simply fill out the information and click on “sign up now.”
Since this will be your personal account, leave the question “Student signing up with teacher’s email address?” marked as “no”:
Your initial account will be setup as a “Free” account.
If you so choose, there are two other options to upgrade your account depending on your needs:
If you are using LiveBinders as a Educator, you can upgrade a discounted price.
Besides larger file upload sizes and storage, teachers can upgrade for access to more powerful organizational tools such as a 3rd row of tabs.
Teachers who are looking for more security around their binder content will purchase the upgrade. This will keep any privately marked uploaded files tied to the private binder. Subscribers can email links from within livebinders and can create more than 10 binders. Any new premium features will be made available automatically to subscribers.
Back to Top
Step 2: Creating Student LiveBinders Accounts
Setting up student LiveBinder accounts is a breeze!
Have students go to www.livebinders.com.
Then have them click on “sign up” at the top of the page.
Once they get to the signup page, have them click on “yes” where it says “student signing up with teacher’s email address”:
Once they click on “yes”, they will get two different boxes – “Teacher’s Email Address” and “Please Retype Teacher’s Email Address.”
At this point, they can enter the teacher’s email address instead of their own. You’ll want to do this if you have any students under the age of 13.
Of course, please refer to the LiveBinders “Privacy Policy” prior to walking your students through the registration process.
Back to Top
Step 3: The “LiveBinder It” Tool and LiveBinders IOS/Android/Chrome Apps
Now that you are signed up for your LiveBinders account, you’ll want to gear up with the great tools and apps that LiveBinders offers.
To go to the LiveBinders tools page, click here.
The “LiveBinder It” Tool
LiveBinders offers an outstanding bookmarklet that makes adding content to LiveBinder a breeze, called the “LiveBinder It” bookmarklet.
To install the bookmarklet, simply go to the LiveBinders tools page or click here.
Once you run across a website that you would like to add to your LiveBinder, simply click the “LiveBinder It” button your your bookmarks bar on your browser:
Once you have clicked on the “LiveBinder It” tool, you’ll see the following box pop up with the following information: Website URL, Tab title, Comments, and Choose your Binder information:
The “Website URL” is the link to the website that you are about to add to your LiveBinder. The “Tab title” is where you can label your tab. By default, title is usually taken from the Website “heading,” but you can edit it here if you want to call your tab a different name.
In the “Comment” section, you have the option of adding a heading, separate from the title.
In the “Choose Your Binder” section, you can add the content to an existing binder or to create a new binder. If you are adding to an existing binder you can select whether to add the content as a “Main Tab” or a “Subtab”.
Once you have made your selections on the “LiveBinder It” screen, your content is automatically added in your LiveBinder. This is a great way to add websites to your binder without having to open the binder:
The “LiveBinder It” tool is available for Google Chrome, Safari, Internet Explorer, and Firefox.
iPad/iPhone App
The iPad/iPhone LiveBinders app, available in the iTunes store, provides you on-the-go access to your LiveBinders whenever you need.
With the iPad App user can do the following:
1.Log into your account to get access to all of your binders and custom shelves.
2. Add -iPad images, and re-size those images, directy into your binders. (This is useful for teachers who are documenting their classroom and want to add their photographs in their binders. )
3. Directly add files to your binders from your own Evernote and Dropbox accounts
4. Word files can be automatically viewed as PDFs in a binder tab
5. Users can create new binders using the “create binder” icon
6. Users can add uploaded files using the “My Uploads” page
More features are being added for the Fall 2014
To go to the LiveBinders iPad app page in iTunes, click here.
With the iPhone App user can do the following:
1. Non LiveBinder users can download the LiveBinders app and get access to teacher and colleague binders without having to log in. (This has been used by many classrooms where students did not have LiveBinders accounts but needed access to the teacher’s homework binders from their iPhone on a daily basis.)
2. Users can log into their account to view all of their binders and custom shelves.
3. Users can search for binders, view Featured binders and Education Category binders.
4. A “History” tab lets users keep track of the binders they’ve recently viewed without having to rely on rescanning or searching for the binder again. This is useful for users who do not have a LiveBinders account.
5. A QR code reader, lets users view binders from a QR code.
More features are being added in the Fall of 2014.
To install the LiveBinders iPhone app, click here.
Android App
LiveBinders also offers a Android app for when you are on-the-go. The Android app allows you to launch www.livebinders.com, adds the “LiveBinder It” bookmarklet to your “share” feature via the Google Chrome web browser and allows you to upload photos to your binder via your Gallery on your phone through the “share” option as well:
Chrome App
LiveBinders also provides a Chrome app via the Chrome app store. With the LiveBinders Chrome app, you can have a shortcut to www.livebinders.com and your LiveBinders page.
Just click on the app and you’re there!
Back to Top
Step 4: Your First LiveBinder
Now that you have signed up for your LiveBinder account, it’s time to get hands on with your first LiveBinder!
When you sign into your account, you’ll see a LiveBinder labeled “Your First Binder”:
This binder is full of excellent resources for you to get started with. It’s a good idea to go through the content on this binder to start off with. This will also let you become familiar with how a LiveBinder is organized and setup:
Back to Top
Step 5: Starting a Blank Binder
Starting a blank binder is quick and easy! Here’s a great video from Matt Bergman on creating a new Livebinder:
If you want to start a blank LiveBinder, simply go to your homepage and click on “Start a Blank Binder” on the Dashboard menu on the left:
Once you have selected to start a blank binder, you’ll be prompted for the following information:
This is where you will create the name of your binder, provide a description, add “tags” to your binder, select the category (i.e. personal, education, etc), select whether you want your binder to be “public” or “private”, and assign an “Access Key” if you would like. More on these settings later.
Back to Top
Step 6: Adding Content To Your LiveBinder
Now that you have your new LiveBinder created, you can choose to add web content via the “LiveBinder It” bookmarklet or you can add content such as: Microsoft Office (Word, Excel, Powerpoint) documents, PDF documents, Flickr images, Youtube videos, QR codes, Embed codes, Dropbox links* and Delicious links.
Note:
If you choose to add content from your Dropbox account, you’ll receive the following message due to a change in policy from Dropbox: “Dropbox has changed their policy on shared links and any new Dropbox links to pdf documents will automatically download when you click on a tab with that Dropbox link. Images and video files will continue to show up inside the tab. You can upload your pdf files to LiveBinders if you want them to appear inside the binder.”
Whenever you want to edit your binder, you can select “edit this binder” from within the binder by clicking on the little binder icon in the upper right. You can also trigger edit mode from your “My Binders” page by clicking on the “edit” button underneath your binder icon:
Once you enter edit mode, you’ll see the binder “control panel” on the top left hand corner. On the binder control panel, you’ll notice a tab that says “Add Content”:
Once you enter the “Add Content” menu, you’ll be prompted with different selections:
You have two different options to add content via this menu: “Upload a File” and “Add other content.”
Here’s a quick video review of how to upload files into your Livebinder:
The “Upload a File” option allows you to add documents such as Microsoft Office docs and PDF files*:
Note:
If you have a free basic account, you will not have the option to mark your files as “private.” If you want to upload your document securely, you’ll want to upgrade your account.
You also have the option of uploading additional content such as Flickr images and Youtube videos via the “Add Other Content”:
Back to Top
Organizing Your LiveBinder
The more content that you add to your LiveBinder the more it will become necessary to organize your binder. With LiveBinders, you can organize your content with “Tabs” and “Sub tabs.”
Here’s a great example of what the Tabs and Sub tabs look like in Joy Kirr’s (@joykirr) “Genius Hour” LiveBinder (click here to see her LiveBinder):
Each “tab” can be labeled with whatever title you prefer.
To change the name of a tab, simply click on it while you are in Edit mode and type the new name. To add multiple items to a tab, you will want to create “sub tabs.”
You can also organize the content of the tabs themselves under the binder control panel in the edit mode. When in edit mode, simply click on the tab that you would like to change the settings on and then click on “Tab Settings” on the binder control panel:
Once you select “Tab settings”, it will bring up the tab settings menu.
This menu will give you several different options to choose from to customize your tabs including tab colors.
To change the text layout of the tabs, click on “Set Current Tab Text Layout:
This option allows you to change the layout of the tab:
Changing the layout of the tab will change how the tab will look like whenever the viewer clicks on that tab. For example, if you choose the option “Media-Text,” you would have a finished product with the same layout as Rhonda Phillips’ (@contentliteracy) LiveBinder “Content Literacy” (click here to see her LiveBinder).
Back to Top
LiveBinders And Bloom’s Taxonomy
Bloom’s Taxonomy is a great guideline to use when preparing lessons and activities. Bloom’s Taxonomy covers 6 stages of learning: Remembering, Understanding, Applying, Analyzing, Evaluating and Creating.
With LiveBinders, you can cover all six stages of learning. Here’s a Bloom’s Taxonomy wheel for LiveBinders:
Back to Top
Using LiveBinders As E-portfolios
Online portfolios are quickly becoming an important reflection of a student’s progress and a great tool to help them achieve future goals.” (LiveBinders.com)
Growth and reflection are two important goals to have in the learning environment. As students further advance their learning, it will be become increasingly important for students (and teachers) to create their own portfolio as evidence what they have accomplished along the way.
With LiveBinders, students can create that portfolio to build and reflect upon what they have done.
Back to Top
Teacher Examples
Dean Mantz (@dmantz7) has a excellent binder that he has put together as his E-portfolio that reflects his learning and professional work, “Dean Mantz – eportfolio” (click here to view his LiveBinder):
Mickie Mueller (@mickie_mueller) also has a great E-portfolio using a LiveBinder to reflect and share her learning and work, “Mickie Mueller’s Portfolio” (click here to view her LiveBinder):
For students in the classroom, using LiveBinders as E-portfolios is a excellent way to showcase their work and build, collaborate, and reflect upon what they have accomplished.
Back to Top
Student/School Examples
When students create their own personal LiveBinder, they can make it their own. Anything they create such as: photos, documents, YouTube videos, audio files, and links to their own blog or blog posts can be added to their LiveBinder.
Here is a great example of how students can utilize LiveBinders to make their own E-portfolios by Emily Waller, “Music Tech Portfolio” (click here to see her LiveBinder):
Using LiveBinders as an E-Portfolio has become so increasingly popular that some schools have started to create LiveBinder E-Portfolio templates for students to use. Buhler High School has provided their students with an outstanding LiveBinder template for their students to get started, “Buhler High School Electronic Portfolio Template” (click here to see the full LiveBinder):
The LiveBinder that Buhler High School has created provides tabs for each subject/class their student takes.
Providing a template like this gives students an excellent example of how a LiveBinder should be organized and maintained.
Back to Top
Sharing E-portfolios
Once you have created your E-portfolio, you can share the link to your binder via social media, email, or embed it on your website or blog!
To share your LiveBinder, simply open your LiveBinder and hover your cursor over the “share” link:
Back to Top
Embedding Binders And Binder Icons
If you wanted to share your LiveBinder on your blog or website, click on the “Link or Embed” option under the “share” link:
The embed options allows you to embed your LiveBinder on your blog or website by showing just the “Binder Icon” or as a “Open Binder.” When you embed your LiveBinder as a “Binder Icon”, your LiveBinder will appear just as it does livebinders.com.
If you have multiple LiveBinders, you can create a “shelf” that includes all the LiveBinders that you would like to add to it.
When you create a “shelf” via the LiveBinders website, you can then share that shelf just as you would a single LiveBinder.
Here’s a great example of a shared LiveBinder shelf from Joy Kirr (@joykirr) on her website (click here to visit her website):
Back to Top
Using LiveBinders for Lesson Planning
When gathering resources for your lesson plans, there can be a plethora of websites that you can obtain with information and activities for your topic. One resource however, you may not have thought about is the LiveBinders website itself, www.livebinders.com. The LiveBinders on livebinders.com are made by some of the most inspiring and engaged educators in the education world today. If you want resources for your lesson plans, go to livebinders.com!
On the featured page, you’ll notice a navigation bar on the right hand side of the screen where you can narrow down your search by subject.
Needing lesson plan resources for STEM? Here’s a peek at what LiveBinders has to offer for STEM resources:
The LiveBinder you see in the example, “100+ STEM Websites & Webtools for Teachers” created by Tia Cooper (@TechwithTia) is a excellent binder to get started with.
Here’s a peek at her LiveBinder (click here to see the full LiveBinder):
Looking for apps you can use with your Special Needs students? Here’s a great collection of apps for kids with Autism:
The LiveBinder you see in the example, “Apps for kids with Autism” created by Toby Price (@jedipadmaster) is an outstanding resource.
Here’s a peek at his LiveBinder (click here to see the full LiveBinder):
Back to Top
Conclusion
I can not speak of LiveBinders highly enough. This guide is just the beginning of your LiveBinder journey with your students. In the end, what matters most is the learning experiences you and your students will have.
Use LiveBinders to it’s fullest potential and beyond. Engage your students by letting them create their own E-portfolio, let them share their LiveBinders on social media or on their blog, let them comment on and evaluate their fellow classmates’ binders, create a central hub for all of your resources so you don’t have to juggle between several other websites to gather your resources and keep updating your LiveBinder as often as you can because your learning never stops. Not only are you an educator, you are a life-long learner. Learn, share, reflect and grow with LiveBinders.
Should you ever need assistance, there are people here to help. Feel free to contact me on Twitter via my Twitter handle, @justinstallings or you can contact LiveBinder support on Twitter via their Twitter handle, @LiveBinders. if you need further assitance with your account, LiveBinders also has a support page on the LiveBinders website: http://www.livebinders.com/help.
A very special thanks to Barbara Tallent, Tina Schneider, Alex Quelle and Dana Brito for their assistance with this guide. This guide would not have been possible had it not been for you.
|
// This is the single constructor for all Delegates. The compiler
// doesn't provide an implementation of the Delegate constructor. We
// provide that implementation through an ECall call to this method.
FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* targetUNSAFE, PCODE method)
{
FCALL_CONTRACT;
struct _gc
{
DELEGATEREF refThis;
OBJECTREF target;
} gc;
gc.refThis = (DELEGATEREF) ObjectToOBJECTREF(refThisUNSAFE);
gc.target = (OBJECTREF) targetUNSAFE;
HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
if (method == NULL)
COMPlusThrowArgumentNull(W("method"));
_ASSERTE(gc.refThis);
_ASSERTE(method);
_ASSERTE(isMemoryReadable(method, 1));
MethodTable *pMTTarg = NULL;
if (gc.target != NULL)
{
pMTTarg = gc.target->GetMethodTable();
}
MethodDesc *pMethOrig = Entry2MethodDesc(method, pMTTarg);
MethodDesc *pMeth = pMethOrig;
MethodTable* pDelMT = gc.refThis->GetMethodTable();
LOG((LF_STUBS, LL_INFO1000, "In DelegateConstruct: for delegate type %s binding to method %s::%s%s, static = %d\n",
pDelMT->GetDebugClassName(),
pMeth->m_pszDebugClassName, pMeth->m_pszDebugMethodName, pMeth->m_pszDebugMethodSignature, pMeth->IsStatic()));
_ASSERTE(pMeth);
#ifdef _DEBUG
MethodTable* p = pMeth->GetMethodTable();
_ASSERTE(p);
_ASSERTE(p->ValidateWithPossibleAV());
#endif
if (Nullable::IsNullableType(pMeth->GetMethodTable()))
COMPlusThrow(kNotSupportedException);
DelegateEEClass *pDelCls = (DelegateEEClass*)pDelMT->GetClass();
MethodDesc *pDelegateInvoke = COMDelegate::FindDelegateInvokeMethod(pDelMT);
MetaSig invokeSig(pDelegateInvoke);
MetaSig methodSig(pMeth);
UINT invokeArgCount = invokeSig.NumFixedArgs();
UINT methodArgCount = methodSig.NumFixedArgs();
BOOL isStatic = pMeth->IsStatic();
if (!isStatic)
{
methodArgCount++;
}
if (NeedsWrapperDelegate(pMeth))
gc.refThis = CreateWrapperDelegate(gc.refThis, pMeth);
if (pMeth->GetLoaderAllocator()->IsCollectible())
gc.refThis->SetMethodBase(pMeth->GetLoaderAllocator()->GetExposedObject());
if (invokeArgCount == methodArgCount)
{
gc.refThis->SetTarget(gc.refThis);
Stub *pShuffleThunk = NULL;
if (!pMeth->IsStatic() && pMeth->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
pShuffleThunk = pDelCls->m_pInstRetBuffCallStub;
else
pShuffleThunk = pDelCls->m_pStaticCallStub;
if (!pShuffleThunk)
pShuffleThunk = SetupShuffleThunk(pDelMT, pMeth);
gc.refThis->SetMethodPtr(pShuffleThunk->GetEntryPoint());
if (!pMeth->IsStatic() && pMeth->IsVirtual() && !pMeth->GetMethodTable()->IsValueType())
{
PCODE pTargetCall = GetVirtualCallStub(pMeth, TypeHandle(pMeth->GetMethodTable()));
gc.refThis->SetMethodPtrAux(pTargetCall);
gc.refThis->SetInvocationCount((INT_PTR)(void *)pMeth);
}
else
{
gc.refThis->SetMethodPtrAux(method);
}
}
else
{
MethodTable* pMTMeth = pMeth->GetMethodTable();
if (!pMeth->IsStatic())
{
if (pMTTarg)
{
g_IBCLogger.LogMethodTableAccess(pMTTarg);
<NICE> We could get the JIT to recognise all delegate creation sequences and
ensure the thing is always an BoxedEntryPointStub anyway </NICE>
if (pMTMeth->IsValueType() && !pMeth->IsUnboxingStub())
{
If these are Object/ValueType.ToString().. etc,
don't need an unboxing Stub.
if ((pMTMeth != g_pValueTypeClass)
&& (pMTMeth != g_pObjectClass))
{
pMeth->CheckRestore();
pMeth = pMTTarg->GetBoxedEntryPointMD(pMeth);
_ASSERTE(pMeth != NULL);
}
}
Only update the code address if we've decided to go to a different target...
<NICE> We should make sure the code address that the JIT provided to us is always the right one anyway,
so we don't have to do all this mucking about. </NICE>
if (pMeth != pMethOrig)
{
method = pMeth->GetMultiCallableAddrOfCode();
}
}
if (gc.target == NULL)
{
COMPlusThrow(kArgumentException, W("Arg_DlgtNullInst"));
}
}
#ifdef HAS_THISPTR_RETBUF_PRECODE
else if (pMeth->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
method = pMeth->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMeth, PRECODE_THISPTR_RETBUF);
#endif HAS_THISPTR_RETBUF_PRECODE
gc.refThis->SetTarget(gc.target);
gc.refThis->SetMethodPtr((PCODE)(void *)method);
}
HELPER_METHOD_FRAME_END();
}
|
<reponame>thomasboussekey/terraform-provider-aiven
package cache
import (
"fmt"
"sync"
aiven "github.com/aiven/aiven-go-client"
)
var (
topics = make(map[string]map[string]aiven.KafkaTopic)
topicCacheLock sync.Mutex
)
//TopicCache type
type TopicCache struct {
}
//write writes the specified topic to the cache
func (t TopicCache) write(project, service string, topic *aiven.KafkaListTopic) (err error) {
var cachedService map[string]aiven.KafkaTopic
var ok bool
if cachedService, ok = topics[project+service]; !ok {
cachedService = make(map[string]aiven.KafkaTopic)
}
topicForCache := aiven.KafkaTopic{
MinimumInSyncReplicas: topic.MinimumInSyncReplicas,
Partitions: partitions(topic.Partitions),
Replication: topic.Replication,
RetentionBytes: topic.RetentionBytes,
RetentionHours: topic.RetentionHours,
State: topic.State,
TopicName: topic.TopicName,
CleanupPolicy: topic.CleanupPolicy}
cachedService[topic.TopicName] = topicForCache
topics[project+service] = cachedService
return
}
//Refresh refreshes the Topic cache
func (t TopicCache) Refresh(project, service string, client *aiven.Client) error {
topicCacheLock.Lock()
defer topicCacheLock.Unlock()
return t.populateTopicCache(project, service, client)
}
//Read populates the cache if it doesn't exist, and reads the required topic. An aiven.Error with status
//404 is returned upon cache miss
func (t TopicCache) Read(project, service, topicName string, client *aiven.Client) (topic aiven.KafkaTopic, err error) {
topicCacheLock.Lock()
defer topicCacheLock.Unlock()
if _, ok := topics[project+service]; !ok {
if err = t.populateTopicCache(project, service, client); err != nil {
return
}
}
if cachedService, ok := topics[project+service]; ok {
if topic, ok = cachedService[topicName]; !ok {
// cache miss, return a 404 so it can be cleaned up later
var liveTopic *aiven.KafkaTopic
if liveTopic, err = client.KafkaTopics.Get(project, service, topicName); err == nil {
topic = *liveTopic
}
}
} else {
err = aiven.Error{
Status: 404,
Message: fmt.Sprintf("Cache miss on project/service: %s/%s", project, service),
}
}
return
}
//partitions returns a slice, of empty aiven.Partition, of specified size
func partitions(numPartitions int) (partitions []*aiven.Partition) {
for i := 0; i < numPartitions; i++ {
partitions = append(partitions, &aiven.Partition{})
}
return
}
//populateTopicCache makes a call to Aiven to list kafka topics, and upserts into the cache
func (t TopicCache) populateTopicCache(project, service string, client *aiven.Client) (err error) {
var topics []*aiven.KafkaListTopic
if topics, err = client.KafkaTopics.List(project, service); err == nil {
for _, topic := range topics {
t.write(project, service, topic)
}
}
return
}
|
def multi_svc_chain(self, policys, svcs):
right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net')
left_net_fix, l_hs_obj = self.config_vn(stack_name='left_net')
vn_list = [left_net_fix, right_net_fix]
vms = []
vms = self.config_vms(vn_list)
svc_tmpls = {}
for i, mode in enumerate(set(svcs.values())):
tmpl = self.config_svc_template(stack_name='st_%d' % i,
mode=mode)
svc_tmpls[mode] = {}
svc_tmpls[mode]['tmpl'] = tmpl
svc_tmpls[mode]['obj'] = tmpl.st_obj
svc_tmpls[mode]['fq_name'] = ':'.join(tmpl.st_fq_name)
sis = {}
i = 1
for svc, mode in svcs.items():
sis[svc] = self.config_svc_instance(
'sil_%d' % i, svc_tmpls[mode]['tmpl'], vn_list)
i += 1
rules = []
test_ping = False
for policy in policys:
if (policy['proto'] == 'icmp') or (policy['proto'] == 'icmp6'):
test_ping = True
rules.append(self.config_svc_rule(direction=policy['direction'],
proto=policy['proto'],
src_ports=policy.get('src_ports',None),
dst_ports=policy.get('dst_ports',None),
src_vns=[left_net_fix], dst_vns=[right_net_fix],
si_fq_names=[(':').join(sis[policy['svc']][0].si_fq_name)]))
chain = self.config_svc_chain(rules, vn_list, [l_hs_obj, r_hs_obj], 'svc_chain')
time.sleep(10)
if test_ping:
assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True)
for policy in policys:
if policy['proto'] == 'icmp':
continue
proto = policy['proto'] if policy['proto'] != 'any' else 'udp'
sport = policy.get('src_ports', 8000)
dport = policy.get('dst_ports', 8000)
if type(sport) == type([]):
sport = sport[0][0]
dport = dport[0][0]
sent, recv = self.verify_traffic(vms[0], vms[1], proto, sport, dport)
assert sent == recv, "%s Traffic with src port %d, dst port %d failed" % (proto, sport, dport)
return True
|
use std::ops::Bound::{self, *};
pub(crate) struct Bounds<T> {
pub start: Bound<T>,
pub end: Bound<T>,
}
impl<'a, T> From<&'a std::ops::Range<T>> for Bounds<&'a T> {
fn from(range: &'a std::ops::Range<T>) -> Self {
Self {
start: Included(&range.start),
end: Excluded(&range.end),
}
}
}
impl<T> TryFrom<Bounds<T>> for std::ops::Range<T> {
type Error = ();
fn try_from(value: Bounds<T>) -> Result<Self, Self::Error> {
let start = match value.start {
Included(start) => start,
_ => return Err(()),
};
let end = match value.end {
Excluded(end) => end,
_ => return Err(()),
};
Ok(Self { start, end })
}
}
impl<'a, T> From<&'a std::ops::RangeFrom<T>> for Bounds<&'a T> {
fn from(range: &'a std::ops::RangeFrom<T>) -> Self {
Self {
start: Included(&range.start),
end: Unbounded,
}
}
}
impl<T> TryFrom<Bounds<T>> for std::ops::RangeFrom<T> {
type Error = ();
fn try_from(value: Bounds<T>) -> Result<Self, Self::Error> {
if !matches!(value.end, Unbounded) {
return Err(());
}
let start = match value.start {
Included(start) => start,
_ => return Err(()),
};
Ok(Self { start })
}
}
impl<'a, T> From<&'a std::ops::RangeTo<T>> for Bounds<&'a T> {
fn from(range: &'a std::ops::RangeTo<T>) -> Self {
Self {
start: Unbounded,
end: Excluded(&range.end),
}
}
}
impl<T> TryFrom<Bounds<T>> for std::ops::RangeTo<T> {
type Error = ();
fn try_from(value: Bounds<T>) -> Result<Self, Self::Error> {
if !matches!(value.start, Unbounded) {
return Err(());
}
let end = match value.end {
Excluded(end) => end,
_ => return Err(()),
};
Ok(Self { end })
}
}
impl<'a, T> From<&'a std::ops::RangeToInclusive<T>> for Bounds<&'a T> {
fn from(range: &'a std::ops::RangeToInclusive<T>) -> Self {
Self {
start: Unbounded,
end: Included(&range.end),
}
}
}
impl<'a, T> From<&'a std::ops::RangeInclusive<T>> for Bounds<&'a T> {
fn from(range: &'a std::ops::RangeInclusive<T>) -> Self {
Self {
start: Included(range.start()),
end: Included(range.end()),
}
}
}
impl<'a, T> From<&'a (Bound<T>, Bound<T>)> for Bounds<&'a T> {
fn from(range: &'a (Bound<T>, Bound<T>)) -> Self {
fn as_ref<T>(bound: &Bound<T>) -> Bound<&T> {
match bound {
Included(ref x) => Included(x),
Excluded(ref x) => Excluded(x),
Unbounded => Unbounded,
}
}
Self {
start: as_ref(&range.0),
end: as_ref(&range.1),
}
}
}
impl<'a, T> From<Bounds<T>> for (Bound<T>, Bound<T>) {
fn from(bounds: Bounds<T>) -> Self {
(bounds.start, bounds.end)
}
}
bitflags::bitflags! {
#[repr(transparent)]
pub(crate) struct Flags: u8 {
const EMPTY = 0x01;
const LB_INC = 0x02;
const UB_INC = 0x04;
const LB_INF = 0x08;
const UB_INF = 0x10;
}
}
impl<'a, T: crate::ToSql> crate::ToSql for Bounds<&'a T> {
fn ty(&self) -> crate::pq::Type {
use crate::pq::ToArray;
match self.start {
Included(start) | Excluded(start) => return start.ty().to_range(),
_ => (),
}
match self.end {
Included(end) | Excluded(end) => return end.ty().to_range(),
_ => (),
}
crate::pq::types::UNKNOWN
}
/*
* https://github.com/postgres/postgres/blob/REL_12_0/src/backend/utils/adt/rangetypes.c#L123
*/
fn to_text(&self) -> crate::Result<Option<Vec<u8>>> {
let mut vec = Vec::new();
let start_char = match self.start {
Included(_) => b'[',
_ => b'(',
};
vec.push(start_char);
bound_to_text(&mut vec, &self.start)?;
vec.push(b',');
bound_to_text(&mut vec, &self.end)?;
let end_char = match self.end {
Included(_) => b']',
_ => b')',
};
vec.push(end_char);
vec.push(b'\0');
Ok(Some(vec))
}
/*
* https://github.com/postgres/postgres/blob/REL_12_0/src/backend/utils/adt/rangetypes.c#L246
*/
fn to_binary(&self) -> crate::Result<Option<Vec<u8>>> {
let mut buf = Vec::new();
let mut flags = Flags::empty();
match self.start {
Included(_) => flags.insert(Flags::LB_INC),
Unbounded => flags.insert(Flags::LB_INF),
_ => (),
};
match self.end {
Included(_) => flags.insert(Flags::UB_INC),
Unbounded => flags.insert(Flags::UB_INF),
_ => (),
};
buf.push(flags.bits());
bound_to_binary(&mut buf, &self.start)?;
bound_to_binary(&mut buf, &self.end)?;
Ok(Some(buf))
}
}
macro_rules! bound {
($bound:ident, $op:ident) => {{
let bound = match $bound {
Included(bound) => bound,
Excluded(bound) => bound,
Unbounded => panic!(),
};
match bound.$op()? {
Some(bound) => bound,
None => return Ok(()),
}
}};
}
fn bound_to_text<T: crate::ToSql>(buf: &mut Vec<u8>, bound: &Bound<&T>) -> crate::Result<()> {
if !matches!(bound, Unbounded) {
let mut b = bound!(bound, to_text);
b.pop(); // removes \0
buf.append(&mut b);
}
Ok(())
}
fn bound_to_binary<T: crate::ToSql>(buf: &mut Vec<u8>, bound: &Bound<&T>) -> crate::Result<()> {
if !matches!(bound, Unbounded) {
let mut b = bound!(bound, to_binary);
crate::to_sql::write_i32(buf, b.len() as i32)?;
buf.append(&mut b);
}
Ok(())
}
impl<T: crate::FromSql> crate::FromSql for Bounds<T> {
/*
* https://github.com/postgres/postgres/blob/REL_12_0/src/backend/utils/adt/rangetypes.c#L81
*/
fn from_text(ty: &crate::pq::Type, raw: Option<&str>) -> crate::Result<Self> {
let raw = crate::not_null(raw)?;
if raw == "empty" {
log::error!("Unsuported empty range");
return Err(Self::error(ty, raw));
}
lazy_static::lazy_static! {
static ref REGEX: regex::Regex =
regex::Regex::new(r"[\[\(](?P<start>.?*),(?P<end>.?*)[\]\)]")
.unwrap();
}
let matches = REGEX.captures(raw).unwrap();
// tstzrange are quoted
let start_str = matches
.name("start")
.map(|x| x.as_str().trim_matches('\"'))
.unwrap_or_default();
let start_bound = if start_str.is_empty() {
Bound::Unbounded
} else {
let start = T::from_text(ty, Some(start_str))?;
match raw.chars().next() {
Some('[') => Bound::Included(start),
Some('(') => Bound::Excluded(start),
_ => return Err(Self::error(ty, raw)),
}
};
// tstzrange are quoted
let end_str = matches
.name("end")
.map(|x| x.as_str().trim_matches('\"'))
.unwrap_or_default();
let end_bound = if end_str.is_empty() {
Bound::Unbounded
} else {
let end = T::from_text(ty, Some(end_str))?;
match raw.chars().last() {
Some(']') => Bound::Included(end),
Some(')') => Bound::Excluded(end),
_ => return Err(Self::error(ty, raw)),
}
};
Ok(Self {
start: start_bound,
end: end_bound,
})
}
/*
* https://github.com/postgres/postgres/blob/REL_12_0/src/backend/utils/adt/rangetypes.c#L163
*/
fn from_binary(ty: &crate::pq::Type, raw: Option<&[u8]>) -> crate::Result<Self> {
let mut buf = crate::from_sql::not_null(raw)?;
let flag = Flags::from_bits_truncate(crate::from_sql::read_u8(&mut buf)?);
if flag.contains(Flags::EMPTY) {
log::error!("Unsuported empty range");
return Err(Self::error(ty, raw));
}
let start_bound = if flag.contains(Flags::LB_INF) {
Bound::Unbounded
} else {
let start_bound_len = crate::from_sql::read_i32(&mut buf)?;
let mut start = Vec::new();
for _ in 0..start_bound_len {
let b = crate::from_sql::read_u8(&mut buf)?;
start.push(b);
}
if flag.contains(Flags::LB_INC) {
Bound::Included(T::from_binary(ty, Some(&start))?)
} else {
Bound::Excluded(T::from_binary(ty, Some(&start))?)
}
};
let end_bound = if flag.contains(Flags::UB_INF) {
Bound::Unbounded
} else {
let end_bound_len = crate::from_sql::read_i32(&mut buf)?;
let mut end = Vec::new();
for _ in 0..end_bound_len {
let b = crate::from_sql::read_u8(&mut buf)?;
end.push(b);
}
if flag.contains(Flags::UB_INC) {
Bound::Included(T::from_binary(ty, Some(&end))?)
} else {
Bound::Excluded(T::from_binary(ty, Some(&end))?)
}
};
Ok(Self {
start: start_bound,
end: end_bound,
})
}
}
|
Considerations of reactive power/voltage control in CAISO market operations
The design and integration of traditional voltage control and stability tools with the Energy Market Management System at the California Independent System Operator (CAISO) is discussed and presented in this paper. The tools are transformed from pure analytical and planning applications into an active participant of the daily and real-time energy market operations to provide valuable assessments of system security and stability based on generation commitments and dispatches under both normal and contingency conditions. The design features presented in this paper are part of the phased approach to integrate voltage control and reactive power optimization into the inner decision making process of the CAISO market system.
|
package main;
public class localBrowser {
private Window win;
public localBrowser(Window win){
this.win = win;
}
public void performSearch(){
String url = win.searchArea.getText();
if(!url.isEmpty())
win.browser.setUrl(url);
}
}
|
{-# LANGUAGE DoAndIfThenElse #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
-- | Network interpreter for Floating Domains specific API
module Network.DO.Domain.Net(dnsCommandsInterpreter) where
import Control.Applicative
import Control.Comonad.Env.Class (ComonadEnv, ask)
import Control.Monad.Trans (MonadIO)
import Data.Aeson as A hiding (Result)
import Data.IP
import Data.Maybe
import Data.Proxy
import Network.DO.Domain.Commands
import Network.DO.Net.Common
import Network.DO.Types as DO hiding (URI)
import Network.REST
import Prelude as P hiding (error)
domainsURI :: String
domainsURI = "domains"
domainsEndpoint :: String
domainsEndpoint = rootURI </> apiVersion </> domainsURI
instance Listable Domain where
listEndpoint _ = domainsEndpoint
listField _ = "domains"
doCreateDomain :: (ComonadEnv ToolConfiguration w, Monad m) => w a -> DomainName -> IP -> (RESTT m (Result Domain), w a)
doCreateDomain w name ip = maybe (return $ error "no authentication token defined", w)
runQuery
(authToken (ask w))
where
runQuery t = let opts = authorisation t
domain = postJSONWith opts (toURI domainsEndpoint) (toJSON config) >>= return . fromResponse "domain"
in (domain, w)
where
config = DomainConfig name ip
doDeleteDomain :: (ComonadEnv ToolConfiguration w, Monad m) => w a -> DomainName -> (RESTT m (Maybe String), w a)
doDeleteDomain w name = maybe (return $ Just "no authentication token defined", w)
(\ t -> let r = deleteJSONWith (authorisation t) (toURI $ domainsEndpoint </> show name) >> return Nothing
in (r, w))
(authToken (ask w))
doListRecords :: (ComonadEnv ToolConfiguration w, Monad m) => w a -> DomainName -> (RESTT m [DomainRecord], w a)
doListRecords w name = maybe (return [], w)
(\ t -> let records = toList "domain_records" <$> getJSONWith (authorisation t) (toURI $ domainsEndpoint </> show name </> "records")
in (records, w))
(authToken (ask w))
doCreateRecord :: (ComonadEnv ToolConfiguration w, Monad m) => w a -> DomainName -> DomainRecord -> (RESTT m (Result DomainRecord), w a)
doCreateRecord w name record =
maybe (return $ error "no authentication token defined", w)
runQuery
(authToken (ask w))
where
runQuery t = let opts = authorisation t
domain = postJSONWith opts (toURI $ domainsEndpoint </> show name </> "records") (toJSON record) >>= return . fromResponse "domain_record"
in (domain, w)
doDeleteRecord :: (ComonadEnv ToolConfiguration w, Monad m) => w a -> DomainName -> Id -> (RESTT m (Maybe String), w a)
doDeleteRecord w name rid =
maybe (return $ Just "no authentication token defined", w)
(\ t -> let r = deleteJSONWith (authorisation t) (toURI $ domainsEndpoint </> show name </> "records" </> show rid ) >> return Nothing
in (r, w))
(authToken (ask w))
dnsCommandsInterpreter :: (MonadIO m, ComonadEnv ToolConfiguration w) => w a -> CoDomainCommands (RESTT m) (w a)
dnsCommandsInterpreter = CoDomainCommands
<$> queryList (Proxy :: Proxy Domain)
<*> doCreateDomain
<*> doDeleteDomain
<*> doListRecords
<*> doCreateRecord
<*> doDeleteRecord
|
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef ADC_ADOC_TK_ATTAG_HXX
#define ADC_ADOC_TK_ATTAG_HXX
// USED SERVICES
// BASE CLASSES
#include <adoc/adoc_tok.hxx>
// COMPONENTS
// PARAMETERS
#include <ary/info/inftypes.hxx>
namespace adoc {
typedef ary::info::E_AtTagId E_AtTagId;
class Tok_at_std : public Token
{
public:
Tok_at_std(
E_AtTagId i_nId )
: eId(i_nId) {}
virtual void Trigger(
TokenInterpreter & io_rInterpreter ) const;
virtual const char *
Text() const;
E_AtTagId Id() const { return eId; }
private:
E_AtTagId eId;
};
#define DECL_TOKEN_CLASS(name) \
class Tok_##name : public Token \
{ public: \
virtual void Trigger( \
TokenInterpreter & io_rInterpreter ) const; \
virtual const char * \
Text() const; \
}
DECL_TOKEN_CLASS(at_base);
DECL_TOKEN_CLASS(at_exception);
DECL_TOKEN_CLASS(at_impl);
DECL_TOKEN_CLASS(at_key);
DECL_TOKEN_CLASS(at_param);
DECL_TOKEN_CLASS(at_see);
DECL_TOKEN_CLASS(at_template);
DECL_TOKEN_CLASS(at_interface);
DECL_TOKEN_CLASS(at_internal);
DECL_TOKEN_CLASS(at_obsolete);
DECL_TOKEN_CLASS(at_module);
DECL_TOKEN_CLASS(at_file);
DECL_TOKEN_CLASS(at_gloss);
DECL_TOKEN_CLASS(at_global);
DECL_TOKEN_CLASS(at_include);
DECL_TOKEN_CLASS(at_label);
DECL_TOKEN_CLASS(at_HTML);
DECL_TOKEN_CLASS(at_NOHTML);
DECL_TOKEN_CLASS(at_since);
#undef DECL_TOKEN_CLASS
} // namespace adoc
#endif
|
def store_position(self, timer):
self._append_time(timer)
self._append_storex(self.xposition[-1])
self._append_storey(self.yposition[-1])
|
.
In experiment with an artificially perfused guinea-pig brain, labelled ethmosine possessing gamma-activity was used to study the penetration of the drug through the blood-brain barrier. The ethmosine transport constant was found to be in a range of 0.45-0.62 x 10(-3) ml x min-1 x g-1 for various segments of the anterior brain, suggesting that the agent penetrated into the central nervous system very slowly. The slight penetration of ethmosine through the blood-brain barrier may be one of the reasons for this phenothiazine derivative having no marked neurotropic activity.
|
def do_python(self, line):
locals = self.getExpressionLocals()
if len(line) != 0:
cobj = compile(line, 'cli_input', 'exec')
exec(cobj, locals)
else:
code.interact(local=locals)
|
A hybrid particle swarm with velocity mutation for constraint optimization problems
Two approaches for solving numerical continuous domain constrained optimization problems are proposed and experimented with. The first approach is based on particle swarm optimization algorithm with a new mutation operator in its velocity updating rule. Also, a gradient mutation is proposed and incorporated into the algorithm. This algorithm uses ε-level constraint handling method. The second approach is based on covariance matrix adaptation evolutionary strategy with the same method for handling constraints. It is experimentally shown that the first approach needs less number of function evaluations than the second one to find a feasible solution while the second approach is more effective in optimizing the objective value. Thus, a hybrid approach is proposed (third approach) which uses the first approach for locating potentially different feasible solutions and the second approach for further improving the solutions found so far. Also, a multi-swarm mechanism is used in which several instances of the first approach are run to locate potentially different feasible solutions. The proposed hybrid approach is applied to 18 standard constrained optimization benchmarks with up to 30 dimensions. Comparisons with two other state-of-the-art approaches show that the hybrid approach performs better in terms of finding feasible solutions and minimizing the objective function.
|
<filename>src/interactions/fun/DadJoke.ts<gh_stars>1-10
import * as DJS from "discord.js";
import { Bot } from "structures/Bot";
import { request } from "undici";
import { SubCommand } from "structures/Command/SubCommand";
interface Data {
attachments: {
fallback: string;
footer: string;
text: string;
}[];
username: string;
}
export default class DadJokeCommand extends SubCommand {
constructor(bot: Bot) {
super(bot, {
commandName: "fun",
name: "dad-joke",
description: "Returns a dad joke",
});
}
async execute(interaction: DJS.CommandInteraction) {
await interaction.deferReply();
const data = (await request(this.APIs.DadJoke).then((res) => res.body.json())) as Data;
await interaction.editReply({ content: data.attachments[0].fallback });
}
}
|
‘Homosexuality is not a problem – it doesn’t do you any harm and can be lots of fun’: Students and Gay Rights Activism in Irish Universities, 1970s–1980s
Using primary archival material, this article explores the role of students and universities in the campaign for gay rights in Ireland in the 1970s and 1980s. At a time when few organisations in Ireland involved themselves in the campaign for gay rights, student bodies facilitated the promotion of gay rights, interaction between gay rights organisations and students and challenged the legal and societal attitudes towards homosexuality in Ireland. In doing so, universities, both north and south of the border, became important spaces of gay rights activism, both in terms of the activities taking place there, but also symbolically, as gay and lesbian students challenged their right to claim a space within their respective universities, something denied to them in the past. Moreover, through the use of the student press, conferences and campaigns to gain official recognition for gay societies, students helped to promote a broader discussion on gay rights in Ireland. This case study analysis of gay rights activism on Irish universities offers an insight into the importance of exploring the efforts of students beyond the long 1960s, arguing that students continued to be important agents in challenging the status quo in Ireland and transforming Irish social norms.
|
<reponame>Higgcz/Gina<filename>api/structs/Structures/Tree.cpp<gh_stars>1-10
//
// Tree.cpp
// Gina
//
// Created by <NAME> on 06.01.14.
// Copyright (c) 2014 <NAME>. All rights reserved.
//
#include <iomanip>
#include <math.h>
#include "Tree.h"
using namespace std;
using namespace Gina::Structures;
void Node::setLeft ( Node *left )
{
if ( left != NULL ) left->parent = this;
this->left = left;
}
void Node::setRight ( Node *right )
{
if ( right != NULL ) right->parent = this;
this->right = right;
}
void Node::setChild ( Node *n )
{
if ( n == NULL ) return;
if ( this->key > n->key ) {
setLeft (n);
} else {
setRight (n);
}
}
bool Node::hasLeft () const
{
return left != NULL;
}
bool Node::hasRight () const
{
return right != NULL;
}
Node* Node::nextForKey ( int k ) const
{
return k > this->key ? this->right : this->left;
}
Node* Node::clearRelations ()
{
this->left = NULL;
this->right = NULL;
this->parent = NULL;
return this;
}
Node* Node::copyData ( Node *node )
{
this->key = node->key;
this->value = node->value;
return this;
}
void Node::print () const
{
printf ( "Node [ %3d ]\t=\t%d\n", this->key, this->value );
}
Node* Tree::insertNode ( int key, int value )
{
Node* n = new Node ( key, value );
insertNode ( n );
return n;
}
void Tree::insertNode ( Node *node )
{
// Find the node where to put the new key
Node* n = root, * parent = NULL;
if ( n == NULL ) {
root = node;
return;
}
while ( n != NULL ) {
parent = n;
n = *n > *node ? n->left : n->right;
}
if ( *parent == *node ) {
parent->copyData ( node );
return;
}
parent->setChild ( node );
n_count++;
}
Node* Tree::deleteKey ( int key )
{
Node* delNode = searchKey ( key );
if ( delNode == NULL ) return NULL;
return deleteNode ( delNode );
}
Node* Tree::deleteNode ( Node *nodeToDelete )
{
Node *nodeToReplace = NULL, *child = NULL;
// Test if Node has only one or none children
if ( !nodeToDelete->hasLeft () || !nodeToDelete->hasRight ()) {
nodeToReplace = nodeToDelete;
} else {
nodeToReplace = maxForNode ( nodeToDelete->left );
}
// Find the child of node to replace
if ( nodeToReplace->hasLeft ()) {
child = nodeToReplace->left;
} else {
child = nodeToReplace->right;
}
if ( child != NULL ) {
child->parent = nodeToReplace->parent;
}
if ( nodeToReplace->parent == NULL ) {
root = nodeToReplace;
} else if ( nodeToReplace == nodeToReplace->parent->left ) {
nodeToReplace->parent->left = child;
} else {
nodeToReplace->parent->right = child;
}
// Copy data instead of delete
if ( nodeToReplace != nodeToDelete ) {
nodeToDelete->copyData ( nodeToReplace );
}
n_count--;
return nodeToReplace;
}
Node* Tree::searchKey ( int key ) const
{
Node* n = root;
while ( n != NULL && n->key != key ) {
n = n->key > key ? n->left : n->right;
}
return n;
}
Node* Tree::minEl () const
{
return minForNode ( root );
}
Node* Tree::maxEl () const
{
return maxForNode ( root );
}
// Private Tree methods
Node* Tree::minForNode ( Node *n ) const
{
if ( n->left == NULL ) return n;
return minForNode ( n->left );
}
Node* Tree::maxForNode ( Node *n ) const
{
if ( n->right == NULL ) return n;
return maxForNode ( n->right );
}
void Tree::printLinear ( Node* focusNode ) const
{
if ( focusNode != NULL ) {
printLinear ( focusNode->left );
focusNode->print ();
printLinear ( focusNode->right );
}
}
void Tree::printLinear () const
{
if ( root != NULL ) {
printLinear ( root->left );
root->print ();
printLinear ( root->right );
}
}
void Tree::print () const
{
printPretty ( root, 1, 1, cout );
}
void Tree::print ( Node* focusNode, int indent ) const
{
printPretty ( focusNode, 1, indent, cout );
}
///////////////////////////////////////////////////////////////////////////////////////
// Prety print
///////////////////////////////////////////////////////////////////////////////////////
int Tree::maxHeight(Node *p) const
{
if (!p) return 0;
int leftHeight = maxHeight(p->left);
int rightHeight = maxHeight(p->right);
return (leftHeight > rightHeight) ? leftHeight + 1: rightHeight + 1;
}
// Print the arm branches (eg, / \ ) on a line
void Tree::printBranches(int branchLen, int nodeSpaceLen, int startLen, int nodesInThisLevel, const deque<Node*>& nodesQueue, ostream& out) const
{
deque<Node*>::const_iterator iter = nodesQueue.begin();
for (int i = 0; i < nodesInThisLevel / 2; i++) {
out << ((i == 0) ? setw(startLen-1) : setw(nodeSpaceLen-2)) << "" << ((*iter++) ? "/" : " ");
out << setw(2*branchLen+2) << "" << ((*iter++) ? "\\" : " ");
}
out << endl;
}
// Print the branches and node (eg, ___10___ )
void Tree::printNodes(int branchLen, int nodeSpaceLen, int startLen, int nodesInThisLevel, const deque<Node*>& nodesQueue, ostream& out) const
{
deque<Node*>::const_iterator iter = nodesQueue.begin();
for (int i = 0; i < nodesInThisLevel; i++, iter++) {
out << ((i == 0) ? setw(startLen) : setw(nodeSpaceLen)) << "" << ((*iter && (*iter)->left) ? setfill('_') : setfill(' '));
if ((*iter) != NULL) {
out << setw(branchLen+2) << (*iter)->value;
} else {
out << setw(branchLen+2) << "";
}
out << ((*iter && (*iter)->right) ? setfill('_') : setfill(' ')) << setw(branchLen) << "" << setfill(' ');
}
out << endl;
}
// Print the leaves only (just for the bottom row)
void Tree::printLeaves(int indentSpace, int level, int nodesInThisLevel, const deque<Node*>& nodesQueue, ostream& out) const
{
deque<Node*>::const_iterator iter = nodesQueue.begin();
for (int i = 0; i < nodesInThisLevel; i++, iter++) {
if ((*iter) != NULL) {
out << ((i == 0) ? setw(indentSpace+2) : setw(2*level+2)) << (*iter)->value;
} else {
out << ((i == 0) ? setw(indentSpace+2) : setw(2*level+2)) << "";
}
}
out << endl;
}
// Pretty formatting of a binary tree to the output stream
// @ param
// level Control how wide you want the tree to sparse (eg, level 1 has the minimum space between nodes, while level 2 has a larger space between nodes)
// indentSpace Change this to add some indent space to the left (eg, indentSpace of 0 means the lowest level of the left node will stick to the left margin)
void Tree::printPretty ( Node *root, int level, int indentSpace, ostream& out ) const
{
int h = maxHeight(root);
int nodesInThisLevel = 1;
int branchLen = 2*((int)pow(2.0,h)-1) - (3-level)*(int)pow(2.0,h-1); // eq of the length of branch for each node of each level
int nodeSpaceLen = 2 + (level+1)*(int)pow(2.0,h); // distance between left neighbor node's right arm and right neighbor node's left arm
int startLen = branchLen + (3-level) + indentSpace; // starting space to the first node to print of each level (for the left most node of each level only)
deque<Node*> nodesQueue;
nodesQueue.push_back(root);
for (int r = 1; r < h; r++) {
printBranches(branchLen, nodeSpaceLen, startLen, nodesInThisLevel, nodesQueue, out);
branchLen = branchLen/2 - 1;
nodeSpaceLen = nodeSpaceLen/2 + 1;
startLen = branchLen + (3-level) + indentSpace;
printNodes(branchLen, nodeSpaceLen, startLen, nodesInThisLevel, nodesQueue, out);
for (int i = 0; i < nodesInThisLevel; i++) {
Node *currNode = nodesQueue.front();
nodesQueue.pop_front();
if (currNode) {
nodesQueue.push_back(currNode->left);
nodesQueue.push_back(currNode->right);
} else {
nodesQueue.push_back(NULL);
nodesQueue.push_back(NULL);
}
}
nodesInThisLevel *= 2;
}
printBranches(branchLen, nodeSpaceLen, startLen, nodesInThisLevel, nodesQueue, out);
printLeaves(indentSpace, level, nodesInThisLevel, nodesQueue, out);
}
|
Evaluating Differences in Mathematical Competencies in Middle School Students during Pandemic Conditions through Preparatec Platform
This research proposes to evaluate the difference in essential mathematical competencies of middle school students transitioning to high school level under pandemic conditions (COVID-19) using the PreparaTec platform and Programme for International Student Assessment (PISA) released items as an evaluation instrument. The study was conducted with 84 students from first semester high school at Prepa Tec (Tecnologico de Monterrey, Campus Cuernavaca) aged 14 and 15. The students were divided into three experimental groups and a control group. Three levels of mathematical competencies: reproduction, definitions, and calculations (level 1), connections and integration to solve problems (level 2), and reflection (level 3) were evaluated. We applied a pre-test at the beginning of the semester and a post-test at the end after using the PreparaTec platform. Both tests were written and applied in a face-to-face format. The study presents the findings regarding the impact of using the PreparaTec platform during the first semester in the three levels of mathematical competencies mentioned. The results obtained are presented in two analyses. The first analysis involves the results obtained per experimental group and according to the number of correct answers per group. The second analysis represents a comparison between the percentage of correct answers and the level of difficulty per question per student regarding the percentage, determined by the Organisation for Economic Cooperation and Development (OECD) and Spain in the format presented by PISA. The pre-test and post-test consisted of 23 questions from items released from PISA, which contemplated measuring the three levels of mathematical competence. The results showed an improvement of 57% in level 1 proficiency questions, 63.6% in level 2 proficiency, and 100% in level 3 proficiency. The findings indicate that new teaching strategies based on Information and Communication Technologies (ICT) and more meaningful assessment forms further develop students’ mathematical competencies.
|
/**
* This class provides functionalities for automated calibration of PID weights for the NXT.
* It provides a test, that forces the NXT to balance and make the same movements each run, while it measures control information and sends it after the test to the PC.
* This class provides two functions to choose next PID-values that have to be testet, a binary search for the optimum and and evolutionary algorithm.
*
* @author Gregor, Robin
*/
public class EvoAlgorithm extends Thread
{
private final EvoDatabase db = new CSVDatabase(new File("evodb_heading6.csv"));
private final UI ui;
private final CommunicatorPC comm;
private final NXTData data;
static boolean running = false;
// This values should allow the NXT to balance on its own and are set to the NXT before each test, to provide a clean start.
private static final PIDWeights STANDARD_PID_WEIGHTS = new PIDWeights(-2.92236328, -16.25, 0.21320325, 0.2671875);
/**
* class constructor.
*
* @param ui reference to the user interface
* @param comm reference to the PC communicator
* @param data NXTData which model the state of the NXT on the PC side
*/
public EvoAlgorithm(UI ui, CommunicatorPC comm, NXTData data) {
setDaemon(true);
this.ui = ui;
this.comm = comm;
this.data = data;
}
@Override
public void run() {
if (running) {
return;
}
running = true;
try {
// linearSearch(STANDARD_PID_WEIGHTS, 3, FitnessMetrics.LINEAR2, 0.005, 3);
evolutionSearch(FitnessMetrics.LINEAR2, 2, 15, 0.01);
} catch (InterruptedException | IOException ex) {
ex.printStackTrace();
}
running = false;
}
/**
* Provides a binary selection of the PID-values. The PID value defined by {@link weightIdx} will be optimized.
* The algorithm searches first for the upper an lower limit, till it it falls down with the values. Starting with this boundaries it tests
* the selected PID-Weights. In each iteration the algorithm selects the limit with the best fitness value defined by {@link metric} to continue with and halves the
* limits of the boundaries. The algorithm terminates after an size of the boundaries defined by {@link delta} is reached.
* The algorithm checks the current database for already measured values to provide faster termination. A minimal size of measurements
* defined by {@link minGroupSize} has to be reached within the current limits of the current iteration. If the number is not provided by the
* database, the needed amount of tests will be performed. Each test will start with {@link initial} PIDWeights to allow a clean start.
*
* @param initial PIDvalues used for a clean start
* @param weightIdx index of PIDvalue that will be optimized
* @param metric defines the fitness value which chooses the best values
* @param delta minimal size of the upper and lower limit difference before the algorithm terminates
* @param minGroupSize required number of measurements for every range of values to evaluate next iteration step
* @throws InterruptedException
* @throws IOException
*/
@SuppressWarnings("unused")
private void linearSearch(PIDWeights initial, int weightIdx, FitnessMetric metric, double delta, int minGroupSize) throws InterruptedException, IOException {
double epsilon = Math.abs(STANDARD_PID_WEIGHTS.get(weightIdx));
// perform initial tests
PIDWeights bestPIDWeights = initial;
double bestFitness = getFitness(initial, weightIdx, epsilon, metric, minGroupSize);
// Find lower or higher optimum
for (PIDWeights lower = bestPIDWeights, upper = bestPIDWeights; lower == bestPIDWeights || upper == bestPIDWeights; epsilon *= 2) {
// Prepare candidates
lower = bestPIDWeights.clone();
lower.set(weightIdx, STANDARD_PID_WEIGHTS.get(weightIdx) - epsilon);
upper = bestPIDWeights.clone();
upper.set(weightIdx, STANDARD_PID_WEIGHTS.get(weightIdx) + epsilon);
// Evaluate candidates
final double lowerFitness = getFitness(lower, weightIdx, epsilon, metric, minGroupSize);
if (lowerFitness > bestFitness) {
bestFitness = lowerFitness;
bestPIDWeights = lower;
}
final double upperFitness = getFitness(upper, weightIdx, epsilon, metric, minGroupSize);
if (upperFitness > bestFitness) {
bestFitness = upperFitness;
bestPIDWeights = upper;
}
ui.showMessage("Best: " + bestPIDWeights.get(weightIdx) + " (" + bestFitness + ")");
System.out.println("Best: " + bestPIDWeights.get(weightIdx) + " (" + bestFitness + ")");
}
epsilon /= 2; // Compensate
// Iterate to find best value
while (epsilon > delta) {
epsilon /= 2; // Half epsilon
// Prepare candidates
final PIDWeights lower = bestPIDWeights.clone();
lower.set(weightIdx, bestPIDWeights.get(weightIdx) - epsilon);
final PIDWeights upper = bestPIDWeights.clone();
upper.set(weightIdx, bestPIDWeights.get(weightIdx) + epsilon);
// Evaluate candidates
final double lowerFitness = getFitness(lower, weightIdx, epsilon, metric, minGroupSize);
if (lowerFitness > bestFitness) {
bestFitness = lowerFitness;
bestPIDWeights = lower;
}
final double upperFitness = getFitness(upper, weightIdx, epsilon, metric, minGroupSize);
if (upperFitness > bestFitness) {
bestFitness = upperFitness;
bestPIDWeights = upper;
}
ui.showMessage("Best: " + bestPIDWeights.get(weightIdx) + " (" + bestFitness + ")");
System.out.println("Best: " + bestPIDWeights.get(weightIdx) + " (" + bestFitness + ")");
}
ui.showMessage("Finished linear optimization for " + (weightIdx + 1) + ". PID value.");
}
/**
* Computes the fitness value of measurements of {@link weights} in range of {@link epsilon} by given {@link metric}.
* If the database does not provide enough measurements, it performs additional measurements to reach the minimal
* amount of measurements defined by {@link minGroupSize}.
*
* @param weights current PID weights that are measured
* @param weightIdx current index of the PID weights that is optimised
* @param epsilon current range of upper and lower bound.
* @param metric current metric to determine the fitness value
* @param minGroupSize minimal group size needed to compute the fitness value
* @return fitness value based on values in the database and the given range of selected PID values.
* @throws IOException
* @throws InterruptedException
*/
private double getFitness(PIDWeights weights, int weightIdx, double epsilon, FitnessMetric metric, int minGroupSize) throws IOException, InterruptedException {
final PIDWeights lowerBound = weights.clone();
lowerBound.set(weightIdx, weights.get(weightIdx) - epsilon / 2);
final PIDWeights upperBound = weights.clone();
upperBound.set(weightIdx, weights.get(weightIdx) + epsilon / 2);
final List<Measurements> measurements = db.getMeasurements(lowerBound, upperBound);
System.out.println(measurements.size() + " measurements found, performing " + Math.max(0, minGroupSize - measurements.size()) + " additions tests.");
for (int i = measurements.size(); i < minGroupSize; i++) {
measurements.add(performTest(weights));
}
final double fitness = FitnessMetric.getFitness(measurements, metric);
System.out.println("Fitness: " + fitness);
return fitness;
}
/**
* Provides an evolutionary selection of PID values that should be tested.
* The values are selected based the provided {@link metric}. The number of iterations is determined by {@link iterations}.
* In each iteration we create a pool of {@linkplain sizeOfPool} values defined by the following rules.
* {@link epsilon} determines the range of the mutations in percent. This range is halved each iteration.
* In the first Iteration 20% of best values in the current database are selected. The remaining 80% are filled with random mutations.
* In the following Iteration we discard 10% of the current population and select 20% of best individuums from the remaining, last iteration.
* The next 40% of the population are created through linear crossing of values. The remaining 40% are created by randomly mutating the values of the current pool.
*
* @param metric
* @param iterations
* @param sizeOfPool
* @param epsilon
* @throws InterruptedException
* @throws IOException
*/
private void evolutionSearch(FitnessMetric metric, int iterations, int sizeOfPool, double epsilon) throws InterruptedException, IOException {
final List<Pair<PIDWeights, Measurements>> PIDpool = db.getBestPIDWeights(metric, sizeOfPool / 5);
for (int i = 0; PIDpool.size() < sizeOfPool; i++) {
PIDpool.add(newRandomizedPIDvalue(PIDpool.get(i % PIDpool.size()), epsilon));
}
for (int iterationNum = 0; iterationNum < iterations + 1; iterationNum++) {
for (int i = 0; i < PIDpool.size(); i++) {
final Pair<PIDWeights, Measurements> currentValue = PIDpool.get(i);
final Measurements measurement = currentValue.getValue();
measurement.addMeasurement(performTest(currentValue.getKey()));
}
{
// ### CREATE NEW GENERATION ###
// retain 1/5 of best individuums
Collections.sort(PIDpool, (a, b) -> -Double.compare(metric.getFitness(a.getValue()), metric.getFitness(b.getValue())));
final double threshold = metric.getFitness(PIDpool.get(sizeOfPool / 5).getValue());
PIDpool.removeIf(a -> metric.getFitness(a.getValue()) < threshold);
// cross 2/5 of population
for (int i = 0; i < 2 * sizeOfPool / 5; i++) {
final Pair<PIDWeights, Measurements> crossedPID = crossPIDvalues(PIDpool.get((int)(PIDpool.size() * Math.random())), PIDpool.get((int)(PIDpool.size() * Math.random())));
PIDpool.add(crossedPID);
}
// kill 1/10 of population
for (int i = 0; i < sizeOfPool / 10; i++) {
PIDpool.remove(Math.random() * PIDpool.size());
}
// mutate 2/5 of population
epsilon /= 2;
for (int i = 0; PIDpool.size() < sizeOfPool; i++) {
PIDpool.add(randomizePIDvalue(PIDpool.get(i % PIDpool.size()), epsilon));
}
}
ui.showMessage("" + iterationNum + ". gen pool done.");
final PIDWeights bestWeights = PIDpool.get(0).getKey();
ui.showMessage("Best values: (" +
bestWeights.weightGyroSpeed + ", " +
bestWeights.weightGyroIntegral + ", " +
bestWeights.weightMotorDistance + ", " +
bestWeights.weightMotorSpeed + ", ");
}
ui.showMessage("Evolution search finished.");
}
/**
* Randomizes the given {@link value} by the percent range defined by {@link epsilon}
*
* @param value reference to PID values.
* @param epsilon mutation range in percent.
* @return randomized PID valeus
*/
private static Pair<PIDWeights, Measurements> randomizePIDvalue(Pair<PIDWeights, Measurements> value, double epsilon) {
final PIDWeights weights = value.getKey();
weights.weightGyroIntegral += (2 * Math.random() - 1.0) * epsilon * weights.weightGyroIntegral;
weights.weightGyroSpeed += (2 * Math.random() - 1.0) * epsilon * weights.weightGyroSpeed;
weights.weightMotorSpeed += (2 * Math.random() - 1.0) * epsilon * weights.weightMotorSpeed;
weights.weightMotorDistance += (2 * Math.random() - 1.0) * epsilon * weights.weightMotorDistance;
return value;
}
/**
* Creates new PIDValues based on the given {@link value}, randomized by the percent range defined by {@link epsilon}.
*
* @param value reference to PID values.
* @param epsilon mutation range in percent.
* @return randomized PID valeus
*/
private static Pair<PIDWeights, Measurements> newRandomizedPIDvalue(Pair<PIDWeights, Measurements> value, double epsilon) {
final Measurements measurement = new Measurements(0, 0, 0, 0, 0);
final PIDWeights weights = value.getKey().clone();
weights.weightGyroIntegral += (2 * Math.random() - 1.0) * epsilon * weights.weightGyroIntegral;
weights.weightGyroSpeed += (2 * Math.random() - 1.0) * epsilon * weights.weightGyroSpeed;
weights.weightMotorSpeed += (2 * Math.random() - 1.0) * epsilon * weights.weightMotorSpeed;
weights.weightMotorDistance += (2 * Math.random() - 1.0) * epsilon * weights.weightMotorDistance;
return new Pair<>(weights, measurement);
}
/**
* Performs a linear addition of both provided values to create a new PID value.
*
* @param value1 PIDValue of first weights
* @param value2 PIDValue of second weights
* @return
*/
private static Pair<PIDWeights, Measurements> crossPIDvalues(Pair<PIDWeights, Measurements> value1, Pair<PIDWeights, Measurements> value2) {
final Measurements measurement = new Measurements(0, 0, 0, 0, 0);
final PIDWeights weight = new PIDWeights(0, 0, 0, 0);
weight.weightGyroIntegral = (value1.getKey().weightGyroIntegral + value2.getKey().weightGyroIntegral) / 2.0;
weight.weightGyroSpeed = (value1.getKey().weightGyroSpeed + value2.getKey().weightGyroSpeed) / 2.0;
weight.weightMotorDistance = (value1.getKey().weightMotorDistance + value2.getKey().weightMotorDistance) / 2.0;
weight.weightMotorSpeed = (value1.getKey().weightMotorSpeed + value2.getKey().weightMotorSpeed) / 2.0;
return new Pair<>(weight, measurement);
}
/**
* Performs a test for the given {@link pidValues}. The test is structured into 10 different phases which are performed each time in the same order.
* The method checks first that the NXT is balancing, then it initializes a clean start with standard Pid values and the continues with
* the given {@link pidValues}. The sequenze of movement, turning and balancing at a spot is hard coded. After each movement command to the NXT
* we check if the NXT is still balancing and break the test in case the NXT has fallen to speed up the processing.
* After an finished test the method requests the measurements from the NXT and returns them.
*
* @param pidValues values that should be tested
* @return returns the measurements received from the NXT
* @throws InterruptedException
* @throws IOException
*/
private Measurements performTest(PIDWeights pidValues) throws InterruptedException, IOException {
ui.showMessage("Testing " + pidValues.weightGyroSpeed + ", " + pidValues.weightGyroIntegral + ", " + pidValues.weightMotorDistance + ", " + pidValues.weightMotorSpeed);
ui.setEvoWeights(pidValues);
data.setMeasurements(null); // Clear measurement
// set standard pid weights
sendPIDWeights(STANDARD_PID_WEIGHTS);
ui.setEvoAlgProcessing("00/10");
do { // Balance for 5s without measuring to let the NXT stabilize itself
if (!data.getBalancing()) {
ui.showMessage("Start balancing to continue.");
do {
Thread.sleep(100);
} while (!data.getBalancing());
}
Thread.sleep(5000);
} while (!data.getBalancing()); // Check NXT hasn't fallen again
Thread.sleep(5000); // Balance for 5s without measuring to let the NXT stabilize itself
int stateNo = 1;
while (data.getBalancing() && stateNo < 10) {
switch (stateNo) {
case 1: // Start measurement
sendPIDWeights(pidValues);
comm.sendSet(EVO_COLLECT_TEST_DATA, true);
ui.setEvoAlgProcessing("01/10");
Thread.sleep(1000);
break;
case 2:
comm.sendMove(20, true);
ui.setEvoAlgProcessing("02/10");
Thread.sleep(5000);
break;
case 3:
comm.sendTurn(180, true);
ui.setEvoAlgProcessing("03/10");
Thread.sleep(2000);
break;
case 4:
comm.sendMove(20, true);
ui.setEvoAlgProcessing("04/10");
Thread.sleep(5000);
break;
case 5:
comm.sendTurn(-180, true);
ui.setEvoAlgProcessing("05/10");
Thread.sleep(2000);
break;
case 6:
comm.sendMove(10, true);
ui.setEvoAlgProcessing("06/10");
Thread.sleep(3000);
break;
case 7:
comm.sendTurn(-180, true);
ui.setEvoAlgProcessing("07/10");
Thread.sleep(2000);
break;
case 8:
comm.sendMove(10, true);
ui.setEvoAlgProcessing("08/10");
Thread.sleep(3000);
break;
case 9:
comm.sendTurn(180, true);
ui.setEvoAlgProcessing("09/10");
Thread.sleep(2000);
break;
}
stateNo++;
}
ui.setEvoAlgProcessing("10/10");
// Wait for measurements
while (data.getMeasurements() == null) {
Thread.sleep(100);
}
final Measurements measurements = data.getMeasurements();
ui.showMessage("Passed test time: " + measurements.time);
ui.showMessage("Battery: " + measurements.averageVoltage);
ui.showMessage("Distance: " + measurements.averageDistanceDifference);
ui.showMessage("Heading: " + measurements.averageHeadingDifference);
db.addData(pidValues, measurements); // Store test result
return measurements;
}
/**
* Sends the {@link weights} given as parameter to the NXT and sets them.
*
* @param weights
*/
private void sendPIDWeights(PIDWeights weights) {
comm.sendSet(PID_WEIGHT_ALL, weights.weightGyroSpeed, weights.weightGyroIntegral, weights.weightMotorDistance, weights.weightMotorSpeed);
}
}
|
/*
* Convert int to hexadecimal string with padding
*
* @param value
* @param padding
* @return hexadecimal string
*/
protected static String hexString(int value, int padding) {
String hexString = "0123456789ABCDEF";
StringBuffer tempString = new StringBuffer
(" ".substring(0, padding));
int offset = padding - 1;
for (int i = 0; i < padding; i++) {
tempString.setCharAt(offset - i,
hexString.charAt(value >> i * 4 & 0xF));
}
return tempString.toString();
}
|
//bees attacks bear and insect mobs that are in the dimension
@SubscribeEvent
public static void MobUpdateEvent(LivingUpdateEvent event)
{
Entity entity = event.getEntity();
if (!entity.world.isRemote &&
entity.dimension == BzDimensionRegistration.bumblezone() &&
Bumblezone.BzConfig.aggressiveBees.get() &&
entity instanceof MobEntity)
{
MobEntity mobEntity = (MobEntity)entity;
if(SetOfBeeHatedEntities.contains(entity.getType()) && !mobEntity.isPotionActive(BzEffects.WRATH_OF_THE_HIVE))
{
((MobEntity)entity).addPotionEffect(new EffectInstance(BzEffects.WRATH_OF_THE_HIVE, Bumblezone.BzConfig.howLongWrathOfTheHiveLasts.get(), 1, false, true));
}
}
}
|
<filename>models/model_job.go
package models
type (
Job map[string]MessagingJobs
MessagingJobs []MessagingJob
MessagingJobValue struct {
Data string `yaml:"data"`
Timestamp int64 `yaml:"timestamp"`
}
MessagingJob struct {
Channel string `yaml:"channel"`
Loop int `yaml:"loop"`
Plugin string `yaml:"plugin"`
Value []MessagingJobValue `yaml:"value,flow"`
}
)
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2021 Mark Kettenis <[email protected]>
*/
#define LOG_CATEGORY UCLASS_IOMMU
#include <common.h>
#include <dm.h>
#include <iommu.h>
#include <malloc.h>
#include <phys2bus.h>
#include <asm/io.h>
#if (CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA))
#if CONFIG_IS_ENABLED(PCI)
static int dev_pci_iommu_enable(struct udevice *dev)
{
struct udevice *parent = dev->parent;
struct udevice *dev_iommu;
u32 *iommu_map;
u32 iommu_map_mask, length, phandle, rid, rid_base;
int i, count, len, ret;
while (parent) {
len = dev_read_size(parent, "iommu-map");
if (len > 0)
break;
parent = parent->parent;
}
if (len <= 0)
return 0;
iommu_map = malloc(len);
if (!iommu_map)
return -ENOMEM;
count = len / sizeof(u32);
ret = dev_read_u32_array(parent, "iommu-map", iommu_map, count);
if (ret < 0) {
free(iommu_map);
return 0;
}
iommu_map_mask = dev_read_u32_default(parent, "iommu-map-mask", ~0);
rid = (dm_pci_get_bdf(dev) >> 8) & iommu_map_mask;
/* Loop over entries until mapping is found. */
for (i = 0; i < count; i += 4) {
rid_base = iommu_map[i];
phandle = iommu_map[i + 1];
length = iommu_map[i + 3];
if (rid < rid_base || rid >= rid_base + length)
continue;
ret = uclass_get_device_by_phandle_id(UCLASS_IOMMU, phandle,
&dev_iommu);
if (ret) {
debug("%s: uclass_get_device_by_ofnode failed: %d\n",
__func__, ret);
free(iommu_map);
return ret;
}
dev->iommu = dev_iommu;
break;
}
free(iommu_map);
return 0;
}
#endif
int dev_iommu_enable(struct udevice *dev)
{
struct ofnode_phandle_args args;
struct udevice *dev_iommu;
int i, count, ret = 0;
count = dev_count_phandle_with_args(dev, "iommus",
"#iommu-cells", 0);
for (i = 0; i < count; i++) {
ret = dev_read_phandle_with_args(dev, "iommus",
"#iommu-cells", 0, i, &args);
if (ret) {
debug("%s: dev_read_phandle_with_args failed: %d\n",
__func__, ret);
return ret;
}
ret = uclass_get_device_by_ofnode(UCLASS_IOMMU, args.node,
&dev_iommu);
if (ret) {
debug("%s: uclass_get_device_by_ofnode failed: %d\n",
__func__, ret);
return ret;
}
dev->iommu = dev_iommu;
}
if (CONFIG_IS_ENABLED(PCI) && count < 0 &&
device_is_on_pci_bus(dev))
return dev_pci_iommu_enable(dev);
return 0;
}
#endif
dma_addr_t dev_iommu_dma_map(struct udevice *dev, void *addr, size_t size)
{
const struct iommu_ops *ops;
if (dev->iommu) {
ops = device_get_ops(dev->iommu);
if (ops && ops->map)
return ops->map(dev->iommu, addr, size);
}
return dev_phys_to_bus(dev, virt_to_phys(addr));
}
void dev_iommu_dma_unmap(struct udevice *dev, dma_addr_t addr, size_t size)
{
const struct iommu_ops *ops;
if (dev->iommu) {
ops = device_get_ops(dev->iommu);
if (ops && ops->unmap)
ops->unmap(dev->iommu, addr, size);
}
}
UCLASS_DRIVER(iommu) = {
.id = UCLASS_IOMMU,
.name = "iommu",
};
|
n,m = map(int,input().split())
x = [i for i in range(1,m+1)]
p = m//2
s=[]
if(m%2):
if(m>1):
s.append(x[p])
l=p-1
r=p+1
while(l>=0 and r<m):
if(x[l]<x[r]):
s.append(x[l])
s.append(x[r])
else:
s.append(x[r])
s.append(x[l])
l-=1
r+=1
elif(m==1):
s=[1]
else:
s.append(x[p-1])
s.append(x[p])
l=p-2
r=p+1
while(l>=0 and r<m):
if(x[l]<x[r]):
s.append(x[l])
s.append(x[r])
else:
s.append(x[r])
s.append(x[l])
l-=1
r+=1
q = n//m
d = n%m
ans = s*q+s[:d]
for i in range(len(ans)):
print(ans[i])
|
/**
* Class to test the overhead of wrapping an object up.
* @author Arend Rensink
* @version $Revision: 5479 $
*/
public class ObjectWrapSpeedTest {
/** Constructs a version of the test. */
public ObjectWrapSpeedTest(String version, Action storeAction, Action getAction) {
this.version = version;
this.measure = reporter.register(version);
this.storeAction = storeAction;
this.getAction = getAction;
}
/** Starts the test. */
public void start() {
test();
System.out.printf("Results for %s: %s ms%n", this.version, this.measure.getTotalTime());
}
private void test() {
this.measure.start();
for (int i = 0; i < BOUND; i++) {
this.getAction.start(this.storeAction.start(new Object()));
}
this.measure.stop();
}
private final String version;
private final Reporter measure;
private final Action storeAction;
private final Action getAction;
/**
* @param args ignores the arguments.
*/
public static void main(String[] args) {
new ObjectWrapSpeedTest("Bare", new Action() {
@Override
public Object start(Object object) {
return object;
}
}, new Action() {
@Override
public Object start(Object object) {
return object;
}
}).start();
new ObjectWrapSpeedTest("Wrapped", new Action() {
@Override
public Object start(Object object) {
return new Wrapper(object);
}
}, new Action() {
@Override
public Object start(Object object) {
return ((Wrapper) object).get();
}
}).start();
new ObjectWrapSpeedTest("Set", new Action() {
@Override
public Object start(Object object) {
return Collections.singleton(object);
}
}, new Action() {
@Override
public Object start(Object object) {
return ((Collection<?>) object).iterator().next();
}
}).start();
new ObjectWrapSpeedTest("Array", new Action() {
@Override
public Object start(Object object) {
return new Object[] {object};
}
}, new Action() {
@Override
public Object start(Object object) {
return ((Object[]) object)[0];
}
}).start();
}
static private final int BOUND = 100000000;
static private final Reporter reporter = Reporter.register(ObjectWrapSpeedTest.class);
/** Interface for an action to be repeatedly invoked in the test. */
private static interface Action {
/** Starts the action. */
Object start(Object object);
}
/** Wrapper class. */
private static class Wrapper {
/** Creates a wrapper instance for a given object. */
Wrapper(Object object) {
this.object = object;
}
/** Retrieves the wrapped object. */
final Object get() {
return this.object;
}
private final Object object;
}
}
|
/**
* Creates a {@link Shape} representing a {@link Geometry},
* according to the specified PointTransformation
* and PointShapeFactory (if relevant).
* <p>
* Note that Shapes do not
* preserve information about which elements in heterogeneous collections
* are 1D and which are 2D.
* For example, a GeometryCollection containing a ring and a
* disk will render as two disks if Graphics.fill is used,
* or as two rings if Graphics.draw is used.
* To avoid this issue use separate shapes for the components.
*
* @param geometry the geometry to convert
* @return a Shape representing the geometry
*/
public Shape toShape(Geometry geometry)
{
if (geometry.isEmpty()) return new GeneralPath();
if (geometry instanceof Polygon) return toShape((Polygon) geometry);
if (geometry instanceof LineString) return toShape((LineString) geometry);
if (geometry instanceof MultiLineString) return toShape((MultiLineString) geometry);
if (geometry instanceof Point) return toShape((Point) geometry);
if (geometry instanceof GeometryCollection) return toShape((GeometryCollection) geometry);
throw new IllegalArgumentException(
"Unrecognized Geometry class: " + geometry.getClass());
}
|
While Jim Carrey was calling gun owners "motherf--kers," Jason Biggs spent Holy Week attacking the Pope.
The Five's Greg Gutfeld took them both on Friday marvelously saying, "Our country is great because washed up comics have the right to suck" (video follows with transcript and commentary):
GREG GUTFELD: So the Jim Carrey gun control video wasn't really about the Second Amendment. It was about Jim Carrey, a waning star, embracing sanctioned targets to create the false front of intelligence. Sort of like America's drone program.
So, it's fitting we began this Holy Week with Carrey ridiculing the late Charlton Heston and now end with Jason Biggs tweeting predictable jokes about the Pope. There were three, trust me, they are forgettable, as is Jason.
But Pope jokes are as old as the Pope, and by all means you should make them. That's why our country is great because washed up comics have the right to suck.
Based on that, Jason and Carrey are civil rights pioneers. They're the Jackie Robinsons of sucking.
I love the First Amendment and Twitter especially for it exposes the soft underbelly of the celebrity simple mind. They undo everything their publicists try to mask. The mystery is replaced by the moronic.
But really, Jimmy and Jason, if you want to create the illusion of edge, at least take a risk. Stop choosing targets approved by your sheep-like peers. The only people you're impressing are those paid to pretend you're funny, i.e., your dates and your agents. Which is why Jason thinks it's cool to hit the Pope and lacks the beans to do the same to Muhammad. It's why Jimmy attacks gun owners, but not gang bangers. And it's why Jimmy mocked Mr. Heston but won't respond to me, yet.
I guess Jimmy thought he couldn't lose a debate to a dead man. That's what's really funny, he did. And now, Charlton Heston has a brighter future in films than Jim Carrey.
BOB BECKEL: Don't, do not sell yourself short here. You got to him.
GUTFELD: I know. This guy -- this is -- he issued a press release which is basically a complete meltdown. This is what I love about this.
Let me read one sentence that we have up here and then I'll get to the funny parts. "I'll just say this, in my opinion, F news," I won't say what it is, "is the last resort for kinda-sorta-almost-journalists whose options have been severely limited by their extreme and intolerant views."
Now, let me explain about extreme and intolerant. This is a guy that insulted most of America and danced on the grave of Charlton Heston.
He -- Jim Carrey is threatening in this letter, threatening legal action, something Charlton Heston can't do because Charlton Heston is dead. That was the whole point. To stand up for somebody who couldn't talk back to a fool like Jim Carrey.
He first tweeted about this on Monday. Basically what he said is that all gun owners are -- I can't say the word.
DANA PERINO: MF.
GUTFELD: MF. And then he released this video, and then he retreated. When you try -- he wouldn't respond. He hid like a little baby. I love it.
|
<gh_stars>1-10
import * as RNFS from "react-native-fs";
import BaseContainer from "../../../lib/container/base/BaseContainer";
import * as MstContainers from "../../../model/impl/MstContainer";
import {
MstItemContainer,
MstSvtLimitContainer,
MstSvtTreasureDeviceContainer,
MstTreasureDeviceLvContainer
} from "../../../model/impl/MstContainer";
import MstUtil from "../utility/MstUtil";
import {EmbeddedCodeConverted, TransSvtName} from "../../../model/master/EmbeddedCodeConverted";
import {MstItem, MstSvtLimit, MstSvtTreasureDevice, MstTreasureDeviceLv} from "../../../model/master/Master";
import {defaultMstGoal, MstGoal} from "./MstGoal";
import Const from "../const/Const";
import {Service} from "../../service/MstService";
import {ImagePool} from "../../../resource/ImagePool";
import {MasterPool} from "../../../resource/MasterPool";
import VersionConfig from "../../../model/config/VersionConfig";
const VersionInfo = require("../../../../config/version.json") as VersionConfig;
const EmbeddedCode = require("../../../resource/embedded_code.json") as EmbeddedCodeConverted;
export default class MstLoader {
private static _instance: MstLoader = undefined;
public static get instance(): MstLoader {
if (MstLoader._instance === undefined) {
MstLoader._instance = new MstLoader();
}
return MstLoader._instance;
}
private constructor() {
this._modelCache = new Map<string, BaseContainer<any>>();
this._service = new Service();
this._basePath = "../../../..";
this._resourcePath = "../../../resource";
console.log(RNFS.DocumentDirectoryPath);
}
private _modelCache: Map<string, BaseContainer<any>>;
private _service: Service;
private _basePath: string;
private _resourcePath: string;
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
//-* RESOURCE
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
public getAppVer(): string {
return VersionInfo.version;
}
public loadImage(type: string, id: number): any {
let key = `${type.toUpperCase()}${id}`;
if (!ImagePool.hasOwnProperty(key)) {
return ImagePool.SKILL0; // default unknown image
} else {
return ImagePool[key];
}
}
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
//-* MODEL
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
public loadModel(name: string): BaseContainer<any> {
if (this._modelCache.has(name)) {
return this._modelCache.get(name);
}
let rawData = MasterPool[name];
let containerName = `${name}Container`;
let instance = new MstContainers[containerName]();
instance.parse(rawData);
this._modelCache.set(name, instance);
return instance;
}
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
//-* GOAL
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
public async loadGoal(): Promise<MstGoal> {
let goal = defaultMstGoal;
let path = `${RNFS.DocumentDirectoryPath}/${Const.DB_FILE_PATH}`;
let exists = await RNFS.exists(path);
if (exists) {
goal = await MstUtil.instance.loadJson(path) as MstGoal;
}
return Promise.resolve(goal);
}
public async writeGoal(goal: Object): Promise<any> {
return MstUtil.instance.writeJson(`${RNFS.DocumentDirectoryPath}/${Const.DB_FILE_PATH}`, goal);
}
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
//-* MASTER DATA
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
public loadSvtDefaultLimitInfo(svtId: number): MstSvtLimit {
let container = MstLoader.instance.loadModel("MstSvtLimit") as MstSvtLimitContainer;
let mstSvtLimit = container.get(svtId, 0); // 首位
if (!mstSvtLimit) {
// 某些从者数据污染,没有对应的信息,直接从group中默认拿第一个
console.log(`Invalid svt default limit info: svtId: ${svtId}, limitCount: 0`);
let limitGroup = container.getGroup(svtId);
mstSvtLimit = limitGroup.values().next().value;
}
return mstSvtLimit;
}
public loadSvtDefaultTreasureDeviceWithLv(svtId: number, level: number): MstTreasureDeviceLv {
// ensure level secure
if (level < 0) {
level = 1;
} else if (level > 5) {
level = 5;
}
let svtTreasureDeviceCon = this.loadModel("MstSvtTreasureDevice") as MstSvtTreasureDeviceContainer;
let svtTreasureDeviceLvCon = this.loadModel("MstTreasureDeviceLv") as MstTreasureDeviceLvContainer;
let devices = svtTreasureDeviceCon.getGroup(svtId);
let device = devices.values().next().value;
let treasureDevice = svtTreasureDeviceLvCon.get(device.treasureDeviceId, level);
if (!treasureDevice) {
// 某些从者数据污染,没有对应等级的宝具信息,直接从group中默认拿第一个
console.log(`Invalid svt treasure device lv info: svtId: ${svtId}, level: ${level}`);
let deviceGroup = svtTreasureDeviceLvCon.getGroup(device.treasureDeviceId);
treasureDevice = deviceGroup.values().next().value;
}
return treasureDevice;
}
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
//-* EMBEDDED CODE
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
public loadEmbeddedCode(): EmbeddedCodeConverted {
return EmbeddedCode;
}
public loadEmbeddedGender(id: number): string {
return this.loadEmbeddedCode().gender[id];
}
public loadEmbeddedPolicy(id: number): string {
return this.loadEmbeddedCode().policy[id];
}
public loadEmbeddedPersonality(id: number): string {
return this.loadEmbeddedCode().personality[id];
}
public loadEmbeddedAttribute(id: number): string {
return this.loadEmbeddedCode().attri[id];
}
public loadEmbeddedSvtName(id: number): TransSvtName {
return this.loadEmbeddedCode().transSvtName[id];
}
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
//-* OTHERS
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-
public loadVisibleItemList(): Array<MstItem> {
let list = [] as Array<MstItem>;
let container = MstLoader.instance.loadModel("MstItem") as MstItemContainer;
let items = container.getRaw() as Array<MstItem>;
items.forEach((item: MstItem) => {
if (this._service.isItemVisible(item.id)) {
list.push(item);
}
});
return list;
}
}
|
package de.dc.javafx.xcore.example.demo;
import de.dc.javafx.xcore.example.BaseContactListView;
import de.dc.javafx.xcore.example.model.Contact;
import de.dc.javafx.xcore.example.tiles.ContactTileBar;
import javafx.application.Application;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.stage.Stage;
@SuppressWarnings("all")
public class ContactTileApplication extends Application {
@Override
public void start(final Stage primaryStage) {
primaryStage.setScene(new Scene(getRoot(), 600, 400));
primaryStage.show();
}
public Parent getRoot() {
return new ContactTileBar();
}
public static void main(final String[] args) {
launch(args);
}
}
|
<reponame>G-MontaG/Paranoia<gh_stars>0
import moment = require("moment");
import {ipcMain, dialog} from "electron";
import {win} from "../../init";
import {FileSystemService} from "../file-system.service";
import {appConfigService} from "../app-config.service";
const pathModule = require('path');
const fs = require('fs');
import _ = require('lodash');
import fileSize = require('filesize');
class FileManagementService {
private _watch = {
encrypt: {
watcher: null,
path: null
},
decrypt: {
watcher: null,
path: null
}
};
private type: string;
constructor(type: string) {
this.type = type;
this._getFiles();
this._addFiles();
this._removeFile();
this._removeDir();
}
public static init() {
ipcMain.on('fileManagementInit', (event, arg) => {
FileSystemService.stat(appConfigService.fileManagement.encryptRoot)
.then((stats) => {
if (!stats) {
return FileSystemService.mkdir(appConfigService.fileManagement.encryptRoot);
}
})
.then(() => {
return FileSystemService.stat(appConfigService.fileManagement.decryptRoot);
})
.then((stats) => {
if (!stats) {
return FileSystemService.mkdir(appConfigService.fileManagement.decryptRoot);
}
})
.then(() => {
event.sender.send('fileManagementInit-reply', true);
})
.catch((err) => {
err.message = "Error create work folders. " + err.message;
event.sender.send('fileManagementInit-reply', err);
});
});
}
private _getFiles() {
ipcMain.on(`fileManagementGetFiles-${this.type}`, (event, arg: string) => {
let currentPath = pathModule.join(appConfigService.fileManagement[`${this.type}Root`], arg);
FileSystemService.stat(currentPath)
.then((stats) => {
if (!stats) {
event.sender.send(`fileManagementGetFiles-${this.type}-reply`, false);
}
})
.then(() => {
if (this._closeWatcher(currentPath, this.type)) {
this._createWatcher(currentPath, this.type);
}
return FileSystemService.readdir(currentPath);
})
.then((files: Array<string>) => {
return this._prepareFileList(currentPath, files);
})
.then((preparedFiles) => {
event.sender.send(`fileManagementGetFiles-${this.type}-reply`, preparedFiles);
})
.catch((err) => {
err.message = "Error reading files from folder. " + err.message;
event.sender.send(`fileManagementGetFiles-${this.type}-reply`, err);
});
});
}
private _createWatcher(path: string, type: string) {
this._watch[type].path = path;
this._watch[type].watcher = fs.watch(path, (eventType, filename) => {
if(eventType === 'change' || eventType === 'rename') {
win.webContents.send(`fileManagementChangeFiles-${type}`);
}
});
}
private _closeWatcher(path: string, type: string) {
if (this._watch[type].path !== path && !!this._watch[type].watcher) {
this._watch[type].watcher.close();
this._watch[type].path = null;
return true;
} else if(!this._watch[type].watcher) {
return true;
}
return false;
}
private _prepareFileList(path: string, files: Array<string>) {
return new Promise((resolve, reject) => {
let preparedFiles = [];
_.forEach(files, (file) => {
let fileInfo = fs.statSync(pathModule.join(path, file));
let fileType;
if(fileInfo.isFile()) {
fileType = 'file';
} else if(fileInfo.isDirectory()) {
fileType = 'dir';
} else {
fileType = null;
}
preparedFiles.push({
name: file,
fullPath: pathModule.join(path, file),
type: fileType,
extension: pathModule.extname(file),
size: fileSize(fileInfo.size),
accessTime: moment(fileInfo.atime),
modifyTime: moment(fileInfo.mtime),
createTime: moment(fileInfo.ctime)
});
});
resolve(preparedFiles);
});
}
private _addFiles() {
ipcMain.on(`fileManagementAddFiles-${this.type}`, (event, arg: string) => {
let currentPath = pathModule.join(appConfigService.fileManagement[`${this.type}Root`], arg);
let arrayOfFiles = dialog.showOpenDialog({
title: `Add files to ${this.type} folder`,
defaultPath: FileSystemService.getUserHomePath(),
buttonLabel: 'Add',
properties: ['openFile', 'multiSelections']
});
_.forEach(arrayOfFiles, (filePath) => {
let toPath = pathModule.join(currentPath, pathModule.basename(filePath));
FileSystemService.copyFile(filePath, toPath);
});
});
}
private _removeFile() {
ipcMain.on(`fileManagementRemoveFile-${this.type}`, (event, arg: {path: string, fileName: string}) => {
let currentPath = pathModule.join(
appConfigService.fileManagement[`${this.type}Root`], arg.path, arg.fileName);
FileSystemService.unlink(currentPath);
});
}
private _removeDir() {
ipcMain.on(`fileManagementRemoveDir-${this.type}`, (event, arg: {path: string, dirName: string}) => {
let currentPath = pathModule.join(
appConfigService.fileManagement[`${this.type}Root`], arg.path, arg.dirName);
FileSystemService.rmdir(currentPath);
});
}
}
new FileManagementService('encrypt');
new FileManagementService('decrypt');
FileManagementService.init();
|
/*
* Create the transient table that will be filled with new___ data during
* CLUSTER, ALTER TABLE, and similar operations. The transient table
* duplicates the logical structure of the OldHeap, but is placed in
* NewTableSpace which might be different from OldHeap's. Also, it's built
* with the specified persistence, which might differ from the original's.
*
* After this, the caller should load the new___ heap with transferred/modified
* data, then call finish_heap_swap to complete the operation.
*/
Oid
make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, char relpersistence,
LOCKMODE lockmode)
{
TupleDesc OldHeapDesc;
char NewHeapName[NAMEDATALEN];
Oid OIDNewHeap;
Oid toastid;
Relation OldHeap;
HeapTuple tuple;
Datum reloptions;
bool isNull;
Oid namespaceid;
OldHeap = heap_open(OIDOldHeap, lockmode);
OldHeapDesc = RelationGetDescr(OldHeap);
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(OIDOldHeap));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", OIDOldHeap);
reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions,
&isNull);
if (isNull)
reloptions = (Datum) 0;
if (relpersistence == RELPERSISTENCE_TEMP)
namespaceid = LookupCreationNamespace("pg_temp");
else
namespaceid = RelationGetNamespace(OldHeap);
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
OIDNewHeap = heap_create_with_catalog(NewHeapName,
namespaceid,
NewTableSpace,
InvalidOid,
InvalidOid,
InvalidOid,
OldHeap->rd_rel->relowner,
OldHeapDesc,
NIL,
RELKIND_RELATION,
relpersistence,
false,
RelationIsMapped(OldHeap),
true,
0,
ONCOMMIT_NOOP,
reloptions,
false,
true,
true,
NULL);
Assert(OIDNewHeap != InvalidOid);
ReleaseSysCache(tuple);
CommandCounterIncrement();
toastid = OldHeap->rd_rel->reltoastrelid;
if (OidIsValid(toastid))
{
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(toastid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", toastid);
reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions,
&isNull);
if (isNull)
reloptions = (Datum) 0;
NewHeapCreateToastTable(OIDNewHeap, reloptions, lockmode);
ReleaseSysCache(tuple);
}
heap_close(OldHeap, NoLock);
return OIDNewHeap;
}
|
Update (March 25): Sony has confirmed that the version 2.5 PS4 firmware will be available for download tomorrow, March 26. In addition to the features discussed below, Sony announced the update will also bring the ability to back up the PS4's internal hard drive to an external USB disk, to find PS4 friends through a Facebook account connection, and to upload recorded videos to DailyMotion in addition to other options. More details are available on the PlayStation Blog.
Original Story (March 12)
Way back in February 2013, Sony's Mark Cerny promised that the PlayStation 4 would include a handy suspend and quick-resume feature. The idea was to pause a game, then leave the console in an "idle mode" that kept the game state in memory, allowing players to pick up later from that exact point with no loading required. Then, just before the system's November 2013 launch, Sony announced that the feature wouldn't be ready in time for the system's North American release.
Nearly 17 months after that launch, Sony is finally ready to make good on its promise. The resume/suspend feature is part of the system upgrades planned for the version 2.50 firmware update, which Sony said will be available "soon." The long-delayed feature will be "supported by nearly all PS4 titles," Sony Director of Product Planning & Software Innovation Scott McCarthy said in an announcement post for the upcoming firmware , dubbed "Yukimura" inside Sony. Microsoft has had a similar "quick startup" feature since launch, as demonstrated in the comparison video on this page
Elsewhere in the update announcement, Sony also discusses new system-wide accessibility options that will be welcome news to those with certain disabilities. Those with vision limitations will be able to take advantage of "text to speech, enlarged text, bolder fonts, higher contrast UI, zoom for displayed pictures, inverted colors on screen," across the platform. The update will also include a system-level button configuration utility, letting players reassign functions from shoulder buttons to face buttons across games, for instance, and "making it easier for users with limited manual dexterity or limited reach and strength to play."
The update also comes with a slight upgrade to streaming gameplay through the Remote Play and Share Play features. That streaming will now be available at a maximum of 60fps, up from a 30fps cap previously. That's a nice upgrade, but you're going to need an extremely healthy Internet connection to take full advantage of it based on our tests.
One final relevant new feature in the upcoming update: the system will now automatically take a screenshot when you earn an in-game trophy. That's sure to help satisfy all those "pics or it didn't happen" jerks on your social media feeds.
|
Armada FC Public Relations | July 22, 2015
Less than a year into their first season at Community First Park at the Baseball Grounds of Jacksonville, the Jacksonville Armada FC have managed to create a unique atmosphere in Downtown Jacksonville.
Whether it is the 30-feet-high kraken known as Squid Vicious lurking around the outfield seating, the firing of the cannons to signify an Armada goal or the roar of Section 904, it would be hard to admit it is not an infectious atmosphere.
And it is clear the hometown “boys in blue” feed off that excitement. The evidence is in the results.
The Armada FC have managed to earn points in four of five matches at their home. And each of those matches have included thrilling moments and nail biting finishes like Keita’s stoppage time strike against the San Antonio Scorpions and this past weekend’s stout defensive effort to stave off the previously undefeated New York Cosmos for a hard-earned victory.
Perhaps no one quite appreciates the immense support as much as hometown hero and Englewood High School and Jacksonville University graduate Tommy Krizanovic, who proudly scored the game winning goal to top the NASL Spring Season Champions Cosmos and send Community First Park into a frenzy.
“Considering that it is the team’s first year of existence, it is impressive,” said Krizanovic. “With the crowd behind us the way they have been, it also most seems like we have been around forever. It makes playing at home feel so much easier. It gives us so much more motivation, and I think that makes a big difference in how we play at home versus how we play on the road. We want to take that with us when we travel.”
Krizanovic knows that the faithful supporters will continue to make a difference as the team moves forward into the fall season.
“(Support at home) will be crucial, and it will have a big impact on the team,” he said. “I think our fans are really stepping up their game. It is like having that 12th man. It plays a big role for the club.”
Armada head coach Guillermo Hoyos sees the culture of the club progressing into a warm and devoted environment that is certain produce more exciting results.
“They are a huge influence,” said Hoyos. “The team really feels protected. All humans move through emotion and feeling support. When we get down, protection like that is exactly what we need. It gives us confidence. It also helps us battle back from those defeats.”
Hoyos’ philosophy is something that involves the whole community. He sees a vibrant future for Jacksonville to be proud of.
“Coming out and winning like we did last weekend really defines the type of team we are building and club that we are,” he said. “So that support is obviously something very good for the club and maturity of the team. We work hard every single day, and sometimes we don’t get the results that we want to see. But if we continue to work hard, victory will be all of ours to celebrate soon.”
|
def remove_data(self, raise_error=False):
path = Path(self.file_path)
if path.exists():
if path.is_dir():
shutil.rmtree(path, ignore_errors=True)
elif path.is_file():
path.unlink()
_, bucket = _s3_bucket_instance(self.bucket)
relative_path = path.relative_to(self.prefix)
try:
bucket.delete_objects(
Delete=dict(
Objects=[
dict(Key=str(relative_path))
]
)
)
logging.info(f'Entry {str(relative_path)} removed from {self.bucket}')
except Exception as e:
logging.error(f'Cannot remove {str(relative_path)} - {str(e)}')
if raise_error:
raise e
|
use std::fmt;
pub use std::io::ErrorKind;
#[derive(Clone, Debug)]
pub struct StdinError {
pub kind: ErrorKind,
}
impl StdinError {
pub fn new(kind: ErrorKind) -> StdinError {
StdinError { kind }
}
}
pub fn to_err(e: StdinError) -> super::Error {
use super::{Error, ErrorKind};
Error::new(ErrorKind::StdinError(e))
}
impl fmt::Display for StdinError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ErrorKind::*;
match self.kind {
NotFound => panic!("Invalid error"),
PermissionDenied => panic!("Invalid error"),
ConnectionRefused => panic!("Invalid error"),
ConnectionReset => panic!("Invalid error"),
ConnectionAborted => panic!("Invalid error"),
NotConnected => panic!("Invalid error"),
AddrInUse => panic!("Invalid error"),
AddrNotAvailable => panic!("Invalid error"),
BrokenPipe => panic!("Invalid error"),
AlreadyExists => panic!("Invalid error"),
WouldBlock => panic!("Invalid error"),
InvalidInput => panic!("Invalid parameters"),
InvalidData => panic!("Invalid parameters"),
TimedOut => write!(f, "stdin operation timed out"),
WriteZero => panic!("Invalid error"),
Interrupted => write!(f, "stdin operation interrupted"),
Other => write!(f, "stdin unknown error"),
UnexpectedEof => write!(f, "stdin unexpected EOF"),
_ => write!(f, "Unknown error"),
}
}
}
|
/**
* A collection of helpful utility methods to read data from Kepler actors'
* ports.
*/
public final class PortHelper {
/**
* Read string from port, throw {@link IllegalActionException} in case it
* is empty.
*
* @param port Actor's port to read data from.
* @return String value of a {@link StringToken} in a port.
* @throws IllegalActionException If port lacks any tokens.
*/
public static String readStringMandatory(final TypedIOPort port)
throws IllegalActionException {
if (port.getWidth() > 0) {
return ((StringToken) port.get(0)).stringValue();
}
final String message = Messages.format("missing.data.on.port.0", port);
throw new IllegalActionException(port, message);
}
/**
* Read string from port, return a default "" value in case it is empty.
*
* @param port Actor's port to read data from.
* @return String value of a {@link StringToken} in a port.
* @throws IllegalActionException If reading from port fails.
*/
public static String readStringOptional(final TypedIOPort port)
throws IllegalActionException {
if (port.getWidth() > 0) {
return ((StringToken) port.get(0)).stringValue();
}
return "";
}
/**
* Read array of strings from port, throw {@link IllegalActionException}
* if port is empty.
*
* @param port Port to read data from.
* @return A list of strings made from {@link StringToken} inside of actor's
* port.
* @throws IllegalActionException If port is empty.
*/
public static List<String> readStringArrayMandatory(final TypedIOPort port)
throws IllegalActionException {
if (port.getWidth() > 0) {
return PortHelper.readStringArray(port);
}
throw new IllegalActionException(port, Messages.format(
"missing.data.on.port.0", port));
}
/**
* Read array of strings from port, return {@link Collections#emptyList}
* if port is empty.
*
* @param port Port to read data from.
* @return A list of strings made from {@link StringToken} inside of actor's
* port.
* @throws IllegalActionException If reading from port fails.
*/
public static List<String> readStringArrayOptional(final TypedIOPort port)
throws IllegalActionException {
if (port.getWidth() > 0) {
return PortHelper.readStringArray(port);
}
return Collections.emptyList();
}
/**
* Read array of strings from port. This private method assumes the port
* contains some data. To decide what to do if that is not true, refer to
* {@link PortHelper#readStringArrayMandatory(TypedIOPort)} or
* {@link PortHelper#readStringArrayOptional(TypedIOPort)}.
*
* @param port Port to read data from.
* @return A list of strings made from {@link StringToken} inside of actor's
* port.
* @throws IllegalActionException If reading from port fails.
*/
private static List<String> readStringArray(final TypedIOPort port)
throws IllegalActionException {
final ArrayToken tokenArray = (ArrayToken) port.get(0);
final int length = tokenArray.length();
final List<String> stringList = new ArrayList<>(length);
for (int i = 0; i < length; i++) {
final String arrayElement =
((StringToken) tokenArray.getElement(i)).stringValue();
stringList.add(arrayElement);
}
return stringList;
}
/**
* Create a singletion attribute for each given port which will make Kepler
* render the name of the port directly on the worfklow.
*
* @param ports Ports whose names should be visible in Kepler.
* @throws NameDuplicationException If the name is duplicated in Kepler
* engine.
* @throws IllegalActionException If there was a problem from Kepler
* engine.
*/
public static void makePortNameVisible(final TypedIOPort... ports)
throws NameDuplicationException, IllegalActionException {
for (final TypedIOPort port : ports) {
new SingletonAttribute(port, "_showName"); //NON-NLS
}
}
private PortHelper() {
super();
}
}
|
<filename>net.violet.platform/src/main/java/net/violet/platform/util/concurrent/units/AbstractCrawlerProcessUnit.java
package net.violet.platform.util.concurrent.units;
import net.violet.common.utils.concurrent.ThreadWatcher;
import net.violet.common.utils.concurrent.units.AbstractProcessUnit;
import net.violet.platform.datamodel.Files;
/**
* Interface for the ProcessUnits used all over the project
*/
public abstract class AbstractCrawlerProcessUnit<C> extends AbstractProcessUnit<String, C, Files> {
private final String id_xml;
private final String title;
private final String link;
private final Integer mTTL;
private ThreadWatcher mThreadWatcher;
/**
* @param id_xml
* @param title
* @param link
* @param publishedDate
* @param threadWatcher
* @param inContent
*/
public AbstractCrawlerProcessUnit(String inMaterial, C inCondition, String id_xml, String title, String link, Integer inTTL) {
super(inMaterial, inCondition, null);
this.id_xml = id_xml;
this.title = title;
this.link = link;
this.mTTL = inTTL;
}
/**
* @return the id_xml
*/
public String getId_xml() {
return this.id_xml;
}
/**
* @return the title
*/
public String getTitle() {
return this.title;
}
/**
* @return the link
*/
public String getLink() {
return this.link;
}
public void setThreadWatcher(ThreadWatcher inWatcher) {
this.mThreadWatcher = inWatcher;
}
@Override
protected ThreadWatcher getThreadWatcher() {
return this.mThreadWatcher;
}
@Override
public Files getResult() {
return super.getResult();
}
public Integer getTTL() {
return this.mTTL;
}
}
|
/// Extend a node span to cover its attributes. (By default, item spans cover only the item body,
/// not the preceding attrs.)
pub fn extended_span(mut s: Span, attrs: &[Attribute]) -> Span {
// Extend `s` backward to cover all the attrs
for attr in attrs {
// Not sure these checks are exactly right, but it seems to work for now.
if attr.span.ctxt() == s.ctxt() && attr.span.lo() < s.lo() {
s = s.with_lo(attr.span.lo());
}
}
s
}
|
<filename>py3cw/test_request.py
import pytest
from .request import Py3CW
def test_error_missing_key():
with pytest.raises(ValueError) as excinfo:
assert Py3CW(
key='',
secret='secret'
)
excinfo.match(r'Missing key')
def test_error_missing_secret():
with pytest.raises(ValueError) as excinfo:
assert Py3CW(
key='key',
secret=''
)
excinfo.match(r'Missing secret')
def test_error_missing_entity():
p3cw = Py3CW(
key='key',
secret='secret'
)
with pytest.raises(ValueError) as excinfo:
assert p3cw.request('', '')
excinfo.match(r'Missing entity')
def test_error_invalid_entity():
p3cw = Py3CW(
key='key',
secret='secret'
)
with pytest.raises(ValueError) as excinfo:
assert p3cw.request('test', '')
excinfo.match(r'Invalid entity')
def test_error_invalid_action():
p3cw = Py3CW(
key='key',
secret='secret'
)
with pytest.raises(ValueError) as excinfo:
assert p3cw.request('smart_trades', 'test')
excinfo.match(r'Invalid action')
def test_error_missing_id():
p3cw = Py3CW(
key='key',
secret='secret'
)
with pytest.raises(ValueError) as excinfo:
assert p3cw.request('smart_trades', 'step_panic_sell')
excinfo.match(r'Missing id')
with pytest.raises(ValueError) as excinfo:
assert p3cw.request('smart_trades', 'step_panic_sell', '')
excinfo.match(r'Missing id')
|
/** Create an association between source and target types */
public static Association createAssociation (Classifier source, Classifier target, RefOntoUML.Package container)
{
Association assoc = factory.createAssociation();
createAssociationEnds(assoc, source, target);
if(container!=null)container.getPackagedElement().add(assoc);
return assoc;
}
|
/**
* Mark index we're sending out this round as sent + update metadata as necessary.
*
* @param graph Local graph to operate on
* @param roundNumber current round number
* @param dga Distributed accumulator for determining if work was done in
* an iteration across all hosts
*/
inline void ConfirmMessageToSendOp(GNode curNode, Graph& graph, const uint32_t roundNumber,
galois::DGAccumulator<uint32_t>& dga) {
NodeData& cur_data = graph.getData(curNode);
if (cur_data.roundIndexToSend != infinity) {
cur_data.dTree.markSent(roundNumber);
}
}
|
On or about February 24, 1848, a twenty-three-page pamphlet was published in London. Modern industry, it proclaimed, had revolutionized the world. It surpassed, in its accomplishments, all the great civilizations of the past—the Egyptian pyramids, the Roman aqueducts, the Gothic cathedrals. Its innovations—the railroad, the steamship, the telegraph—had unleashed fantastic productive forces. In the name of free trade, it had knocked down national boundaries, lowered prices, made the planet interdependent and cosmopolitan. Goods and ideas now circulated everywhere. Just as important, it swept away all the old hierarchies and mystifications. People no longer believed that ancestry or religion determined their status in life. Everyone was the same as everyone else. For the first time in history, men and women could see, without illusions, where they stood in their relations with others. The new modes of production, communication, and distribution had also created enormous wealth. But there was a problem. The wealth was not equally distributed. Ten per cent of the population possessed virtually all of the property; the other ninety per cent owned nothing. As cities and towns industrialized, as wealth became more concentrated, and as the rich got richer, the middle class began sinking to the level of the working class. Soon, in fact, there would be just two types of people in the world: the people who owned property and the people who sold their labor to them. As ideologies disappeared which had once made inequality appear natural and ordained, it was inevitable that workers everywhere would see the system for what it was, and would rise up and overthrow it. The writer who made this prediction was, of course, Karl Marx, and the pamphlet was “The Communist Manifesto.” He is not wrong yet. Considering his rather glaring relevance to contemporary politics, it’s striking that two important recent books about Marx are committed to returning him to his own century. “Marx was not our contemporary,” Jonathan Sperber insists, in “Karl Marx: A Nineteenth-Century Life” (Liveright), which came out in 2013; he is “more a figure of the past than a prophet of the present.” And Gareth Stedman Jones explains that the aim of his new book, “Karl Marx: Greatness and Illusion” (Harvard), is “to put Marx back in his nineteenth-century surroundings.” The mission is worthy. Historicizing—correcting for the tendency to presentize the past—is what scholars do. Sperber, who teaches at the University of Missouri, and Stedman Jones, who teaches at Queen Mary University of London and co-directs the Centre for History and Economics at the University of Cambridge, both bring exceptional learning to the business of rooting Marx in the intellectual and political life of nineteenth-century Europe. Marx was one of the great infighters of all time, and a lot of his writing was topical and ad hominem—no-holds-barred disputes with thinkers now obscure and intricate interpretations of events largely forgotten. Sperber and Stedman Jones both show that if you read Marx in that context, as a man engaged in endless internecine political and philosophical warfare, then the import of some familiar passages in his writings can shrink a little. The stakes seem more parochial. In the end, their Marx isn’t radically different from the received Marx, but he is more Victorian. Interestingly, given the similarity of their approaches, there is not much overlap. Still, Marx was also what Michel Foucault called the founder of a discourse. An enormous body of thought is named after him. “I am not a Marxist,” Marx is said to have said, and it’s appropriate to distinguish what he intended from the uses other people made of his writings. But a lot of the significance of the work lies in its downstream effects. However he managed it, and despite the fact that, as Sperber and Stedman Jones demonstrate, he can look, on some level, like just one more nineteenth-century system-builder who was convinced he knew how it was all going to turn out, Marx produced works that retained their intellectual firepower over time. Even today, “The Communist Manifesto” is like a bomb about to go off in your hands. And, unlike many nineteenth-century critics of industrial capitalism—and there were a lot of them—Marx was a true revolutionary. All of his work was written in the service of the revolution that he predicted in “The Communist Manifesto” and that he was certain would come to pass. After his death, communist revolutions did come to pass—not exactly where or how he imagined they would but, nevertheless, in his name. By the middle of the twentieth century, more than a third of the people in the world were living under regimes that called themselves, and genuinely believed themselves to be, Marxist. This matters because one of Marx’s key principles was that theory must always be united with practice. That’s the point of the famous eleventh thesis on Feuerbach: “Philosophers have hitherto only interpreted the world in various ways; the point is to change it.” Marx was not saying that philosophy is irrelevant; he was saying that philosophical problems arise out of real-life conditions, and they can be solved only by changing those conditions—by remaking the world. And Marx’s ideas were used to remake the world, or a big portion of it. Although no one would hold him responsible, in a juridical sense, for the outcome, on Marx’s own principle the outcome tells us something about the ideas. In short, you can put Marx back into the nineteenth century, but you can’t keep him there. He wasted a ridiculous amount of his time feuding with rivals and putting out sectarian brush fires, and he did not even come close to completing the work he intended as his magnum opus, “Capital.” But, for better or for worse, it just is not the case that his thought is obsolete. He saw that modern free-market economies, left to their own devices, produce gross inequalities, and he transformed a mode of analysis that goes all the way back to Socrates—turning concepts that we think we understand and take for granted inside out—into a resource for grasping the social and economic conditions of our own lives.
Apart from his loyal and lifelong collaborator, Friedrich Engels, almost no one would have guessed, in 1883, the year Marx died, at the age of sixty-four, how influential he would become. Eleven people showed up for the funeral. For most of his career, Marx was a star in a tiny constellation of radical exiles and failed revolutionaries (and the censors and police spies who monitored them) but almost unknown outside it. The books he is famous for today were not exactly best-sellers. “The Communist Manifesto” vanished almost as soon as it was published and remained largely out of print for twenty-four years; “Capital” was widely ignored when the first volume came out, in 1867. After four years, it had sold a thousand copies, and it was not translated into English until 1886. “Don’t make me send in the bad cat.” The second and third volumes of “Capital” were published after Marx died, stitched together by Engels from hundreds of pages of scrawled-over drafts. (Marx had spectacularly bad handwriting; Engels was one of the few people outside the family who could decipher it.) The “Theses on Feuerbach,” which Marx wrote in 1845, were not discovered until 1888, when Engels published them, and some of the texts most important for twentieth-century Marxists—the cobbled-together volume known as “The German Ideology,” the so-called Paris manuscripts of 1844, and the book entitled the “Grundrisse” by its Soviet editors—were unknown until after 1920. The unfinished Paris manuscripts, a holy text in the nineteen-sixties, did not appear in English until 1959. Marx seems to have regarded none of that material as publishable. In Marx’s own lifetime, the work that finally brought him attention outside his circle was a thirty-five-page item called “The Civil War in France,” published in 1871, in which he hailed the short-lived and violently suppressed Paris Commune as “the glorious harbinger of a new”—that is, communist—“society.” It’s not a text that is cited much today. One reason for Marx’s relative obscurity is that only toward the end of his life did movements to improve conditions for workers begin making gains in Europe and the United States. To the extent that those movements were reformist rather than revolutionary, they were not Marxist (although Marx did, in later years, speculate about the possibility of a peaceful transition to communism). With the growth of the labor movement came excitement about socialist thought and, with that, an interest in Marx. Still, as Alan Ryan writes in his characteristically lucid and concise introduction to Marx’s political thought, “Karl Marx: Revolutionary and Utopian” (Liveright), if Vladimir Lenin had not arrived in Petrograd in 1917 and taken charge of the Russian Revolution, Marx would probably be known today as “a not very important nineteenth-century philosopher, sociologist, economist, and political theorist.” The Russian Revolution made the world take Marx’s criticism of capitalism seriously. After 1917, communism was no longer a utopian fantasy.
Marx is a warning about what can happen when people defy their parents and get a Ph.D. Marx’s father, a lawyer in the small city of Trier, in western Germany, had tried to steer him into the law, but Marx chose philosophy. He studied at the Friedrich-Wilhelms-Universität, where Hegel once taught, and he became involved with a group of intellectuals known as the Young Hegelians. Hegel was cautious about criticizing religion and the Prussian state; the Young Hegelians were not, and, just as Marx was being awarded his degree, in 1841, there was an official crackdown. Marx’s mentor was fired, and the Young Hegelians became academic pariahs. So Marx did what many unemployed Ph.D.s do: he went into journalism. Apart from a few small book advances, journalism was Marx’s only source of earned income. (There is a story, though Sperber considers it unsubstantiated, that once, in desperation, he applied for a job as a railway clerk and was turned down for bad handwriting.) In the eighteen-forties, Marx edited and contributed to political newspapers in Europe; from 1852 to 1862, he wrote a column for the New York Daily Tribune, the paper with the largest circulation in the world at the time. When journalistic work dried up, he struggled. He depended frequently on support from Engels and advances on his inheritance. He was sometimes desperate for food; at one point, he couldn’t leave the house because he had pawned his only coat. The claim that the author of “Capital” was financially inept, and that he and his wife wasted what little money came their way on middle-class amenities like music and drawing lessons for the children, became a standard “irony” in Marx biographies. Sperber contests this. Marx had less money to waste than historians have assumed, and he accepted poverty as the price of his politics. He would gladly have lived in a slum himself, but he didn’t want his family to suffer. Three of the Marxes’ children died young and a fourth was stillborn; poverty and substandard living conditions may have been factors. Marx’s journalism made him into a serial exile. He wrote and published articles offensive to the authorities, and, in 1843, he was kicked out of Cologne, where he was helping run a paper called Rheinische Zeitung. He went to Paris, which had a large German community, and that is where he and Engels became friends. An earlier encounter in Cologne had not gone well, but they met again at the Café de la Régence, in 1844, and ended up spending ten days together talking. Engels, who was two years younger, had the same politics as Marx. Soon after they met, he wrote his classic study “The Condition of the Working Class in England,” which ends by predicting a communist revolution. Engels’s father was a German industrialist in the textile business, an owner of factories in Barmen and Bremen and in Manchester, England, and although he disapproved of his son’s politics and the company he kept, he gave him a position at the Manchester factory. Engels hated the work, but he was good at it, as he was at most things. He went fox hunting with the gentry he despised, and made fun of Marx’s attempts to ride a horse. Engels eventually became a partner, and the income helped him keep Marx alive. In 1845, Marx was expelled from France. He moved to Brussels. Three years later, though, something happened that almost no one had foreseen: revolutions broke out across Europe, including in France, Italy, Germany, and the Austrian Empire. Marx wrote “The Communist Manifesto” just as those uprisings were getting under way. When unrest reached Brussels, he was suspected of arming insurgents and was evicted from Belgium, but he returned to Paris. Rioters there had broken into the Tuileries and set the French throne on fire. By the year’s end, most of the revolutions had been crushed by monarchist forces. Many people who were or would become important figures in European art and literature—Wagner, Dostoyevsky, Baudelaire, Turgenev, Berlioz, Delacroix, Liszt, George Sand—had been caught up in the revolutionary excitement, and the outcome led to a crisis of faith in politics (the subject of Flaubert’s novel “Sentimental Education”). The failure of the 1848 revolutions is what Marx’s line “the first time as tragedy, the second time as farce” refers to. (He got the phrase from Engels.) The “tragedy” was the fate of the French Revolution under Napoleon; the “farce” was the election of Napoleon’s nephew, Louis-Napoleon Bonaparte, whom Marx considered a nonentity, to the Presidency of France, in December, 1848. Bonaparte eventually declared himself Emperor and ruled until 1870, when France lost a war with Prussia. The Paris Commune was a by-product of that war. So in 1849 Marx was forced into exile once again. He fled with his family to London. He assumed that the stay would be temporary, but he lived there for the rest of his life. That is where, day after day in the Reading Room of the British Museum, he did the research for “Capital,” and it is where, in Highgate Cemetery, he is buried. The impressive bronze bust you see on his tombstone today was placed there, in 1956, by the Communist Party of Great Britain.
What was Marx like? The number of first-person reports is not large, but they tend to agree. He was, in some respects, a caricature of the German academic (which he had once expected to become): an imperious know-it-all with untamed hair in a misbuttoned frock coat. He once described himself to one of his children as “a machine condemned to devour books and then throw them, in a changed form, on the dunghill of history.” He wrote all night in clouds of tobacco smoke, books and papers piled around him. “They are my slaves,” he said, “and they must serve me as I please.” In professional matters, he was forbidding. He was a cogent speaker but had a lisp and was a poor orator; he knew it, and rarely addressed a crowd. He was ruthless in print, made enemies of many friends and former allies, and did not suffer fools—a large subset of his acquaintance, in his view. One German exile referred to him as “an intellectual customs agent and border guard, appointed on his own authority.” Still, he commanded respect. A colleague, recalling Marx at twenty-eight, described him as “a born leader of the people.” He was actually good at running the show—as an editor and, later on, as the dominant figure in the International Workingmen’s Association, known as the First International. His hair was black; his eyes were black; his complexion was swarthy. Engels called him the “black fellow from Trier”; his wife and children called him the Moor. In private, he was modest and gracious. When he was not sick—he had a bad liver, suffered from bronchitis, and grew fist-size boils, which Sperber thinks were caused by an autoimmune disorder but which may have been a symptom of his liver disease—he was playful and affectionate. He loved Shakespeare, made up stories for his three daughters, and enjoyed cheap cigars and red wine. His wife and daughters adored him. A Prussian government spy who visited Marx at his home in 1852 was surprised to find him “the gentlest and mildest of men.” He became engaged to Jenny von Westphalen, also from Trier, when he was eighteen and she was twenty-two. Sperber thinks that a fairy tale has grown up about the marriage, but Jenny is said to have been exceptionally beautiful, and she was devoted to Karl. He wrote passionate love poetry for her. The engagement lasted seven years, during which he finished his studies, and they rarely saw each other. The relationship was mainly epistolary. (Sperber believes that they had premarital sex. I certainly hope so.) In her letters, Jenny calls Karl her “little wild boar.” The one possible flaw in the domestic idyll has to do with a child born to their servant, Helene Demuth. She was a “gift” to the Marxes from Jenny’s mother and lived with the family. (Almost all women in nineteenth-century Britain who could manage to retain a servant did so. Even Miss Bates, in Jane Austen’s “Emma,” who lives on the charity of her well-off neighbors, has a servant.) Helene’s child, named Frederick and called Freddy, was born in 1851 and was brought up by foster parents. Marx’s daughters didn’t meet him until after Marx’s death. Engels claimed paternity. This was not implausible. Engels was unmarried and had a taste for working-class women; his longtime lover, Mary Burns, worked in a Manchester factory. On his deathbed, though, forty-four years later, he is supposed to have named Marx as Freddy’s real father, information that became known in Communist circles but was not made public until 1962. Sperber and Stedman Jones accept the story, as does the author of the standard English-language biography, David McLellan, although one of Engels’s biographers, Terrell Carver, thinks that the evidence is not conclusive. Demuth remained with the family; after Marx’s death, she went to work for Engels. And the Marxes’ marriage survived.
It is sympathy for Marx that leads Sperber and Stedman Jones to insist that we read him in his nineteenth-century context, because they hope to distance him from the interpretation of his work made after his death by people like Karl Kautsky, who was his chief German-language exponent; Georgi Plekhanov, his chief Russian exponent; and, most influentially, Engels. It was thanks mainly to those writers that people started to refer to Marxism as “scientific socialism,” a phrase that sums up what was most frightening about twentieth-century Communism: the idea that human beings can be reëngineered in accordance with a theory that presents itself as a law of history. The word the twentieth century coined for that was totalitarianism. So, by 1939, when the British philosopher Isaiah Berlin published his widely read and not wholly unadmiring study “Karl Marx: His Life and Environment” (still in print), he could describe Marx as “among the great authoritarian founders of new faiths, ruthless subverters and innovators who interpret the world in terms of a single, clear, passionately held principle, denouncing and destroying all that conflicts with it. His faith . . . was of that boundless, absolute kind which puts an end to all questions and dissolves all difficulties.” This became the Cold War Marx. It’s true that Marx was highly doctrinaire, something that did not wear well with his compatriots in the nineteenth century, and that certainly does not wear well today, after the experience of the regimes conceived in his name. It therefore sounds perverse to say that Marx’s philosophy was dedicated to human freedom. But it was. Marx was an Enlightenment thinker: he wanted a world that is rational and transparent, and in which human beings have been liberated from the control of external forces. This was the essence of Marx’s Hegelianism. Hegel argued that history was the progress of humanity toward true freedom, by which he meant self-mastery and self-understanding, seeing the world without illusions—illusions that we ourselves have created. The Young Hegelians’ controversial example of this was the Christian God. (This is what Feuerbach wrote about.) We created God, and then pretended that God created us. We hypostatized our own concept and turned it into something “out there” whose commandments (which we made up) we struggle to understand and obey. We are supplicants to our own fiction. Concepts like God are not errors. History is rational: we make the world the way we do for a reason. We invented God because God solved certain problems for us. But, once a concept begins impeding our progress toward self-mastery, it must be criticized and transcended, left behind. Otherwise, like the members of the Islamic State today, we become the tools of our Tool. What makes it hard to discard the tools we have objectified is the persistence of the ideologies that justify them, and which make what is only a human invention seem like “the way things are.” Undoing ideologies is the task of philosophy. Marx was a philosopher. The subtitle of “Capital” is “Critique of Political Economy.” The uncompleted book was intended to be a criticism of the economic concepts that make social relations in a free-market economy seem natural and inevitable, in the same way that concepts like the great chain of being and the divine right of kings once made the social relations of feudalism seem natural and inevitable. The reason that “Capital” looks more like a work of economics than like a work of philosophy—the reason that it is filled with tables and charts rather than with syllogisms—is the reason given in the eleventh thesis on Feuerbach: the purpose of philosophy is to understand conditions in order to change them. Marx liked to say that when he read Hegel he found philosophy standing on its head, so he turned it over and placed it on its feet. Life is doing, not thinking. It is not enough to be the masters of our armchairs. Marx thought that industrial capitalism, too, was created for a good reason: to increase economic output—something that “The Communist Manifesto” celebrates. The cost, however, is a system in which one class of human beings, the property owners (in Marxian terms, the bourgeoisie), exploits another class, the workers (the proletariat). “Whenever I have a big decision to make, I ask myself, ‘What would Dad do?’ ” Capitalists don’t do this because they are greedy or cruel (though one could describe their behavior that way, as Marx almost invariably did). They do it because competition demands it. That’s how the system operates. Industrial capitalism is a Frankenstein’s monster that threatens its own creators, a system that we constructed for our own purposes and is now controlling us. Marx was a humanist. He believed that we are beings who transform the world around us in order to produce objects for the benefit of all. That is our essence as a species. A system that transforms this activity into “labor” that is bought and used to aggrandize others is an obstacle to the full realization of our humanity. Capitalism is fated to self-destruct, just as all previous economic systems have self-destructed. The working-class revolution will lead to the final stage of history: communism, which, Marx wrote, “is the solution to the riddle of history and knows itself as this solution.” Marx was fanatically committed to finding empirical corroboration for his theory. That’s what it meant to put philosophy on its feet. And that’s why he spent all those hours alone in the British Museum, studying reports on factory conditions, data on industrial production, statistics about international trade. It was a heroic attempt to show that reality aligned with theory. No wonder he couldn’t finish his book. Marx had very little to say about how the business of life would be conducted in a communist society, and this turned out to be a serious problem for regimes trying to put communism into practice. He had reasons for being vague. He thought that our concepts, values, and beliefs all arise out of the conditions of our own time, which means that it’s hard to know what lies on the other side of historical change. In theory, after the revolution, everything will be “up for grabs”—which has been the great dream of leftist radicalism ever since. Marx was clearer about what a communist society would not have. There would be no class system, no private property, no individual rights (which Marx thought boil down to protecting the right of the owners of property to hang on to it), and no state (which he called “a committee for managing the common affairs of the whole bourgeoisie”). The state, in the form of the Party, proved to be one bourgeois concept that twentieth-century Communist regimes found impossible to transcend. Communism is not a religion; it truly is, as anti-Communists used say about it, godless. But the Party functions in the way that Feuerbach said God functions in Christianity, as a mysterious and implacable external power. Marx did not, however, provide much guidance for how a society would operate without property or classes or a state. A good example of the problem is his criticism of the division of labor. In the first chapter of “The Wealth of Nations,” in 1776, Adam Smith identified the division of labor—that is, specialization—as the key to economic growth. Smith’s case study was the manufacture of pins. Rather than have a single worker make one pin at a time, Smith argued, a pin factory can split the job into eighteen separate operations, starting with drawing out the wire and ending with the packaging, and increase production by a factor of thousands. To us, this seems an obviously efficient way to organize work, from automobile assembly lines to “knowledge production” in universities. But Marx considered the division of labor one of the evils of modern life. (So did Hegel.) It makes workers cogs in a machine and deprives them of any connection with the product of their labor. “Man’s own deed becomes an alien power opposed to him, which enslaves him instead of being controlled by him,” as Marx put it. In a communist society, he wrote, “nobody has one exclusive sphere of activity but each can become accomplished in any branch he wishes.” It will be possible “to hunt in the morning, fish in the afternoon, rear cattle in the evening, criticize after dinner . . . without ever becoming hunter, fisherman, herdsman, or critic.” This often quoted passage sounds fanciful, but it is at the heart of Marx’s thought. Human beings are naturally creative and sociable. A system that treats them as mechanical monads is inhumane. But the question is, How would a society without a division of labor produce sufficient goods to survive? Nobody will want to rear the cattle (or clean the barn); everyone will want to be the critic. (Believe me.) As Marx conceded, capitalism, for all its evils, had created abundance. He seems to have imagined that, somehow, all the features of the capitalist mode of production could be thrown aside and abundance would magically persist.
In 1980, the philosopher Peter Singer published a short book on Marx in which he listed some of Marx’s predictions: the income gap between workers and owners would increase, independent producers would be forced down into the ranks of the proletariat, wages would remain at subsistence levels, the rate of profit would fall, capitalism would collapse, and there would be revolutions in the advanced countries. Singer thought that most of these predictions were “so plainly mistaken” that it was difficult to understand how anyone sympathetic to Marx could defend them. In 2016, it is harder to be dismissive. “Economists today would do well to take inspiration from his example,” Thomas Piketty says about Marx, in the best-seller he published in 2013, “Capital in the Twenty-first Century.” The book did for many twenty-first-century readers what Marx hoped “Capital” might do for nineteenth-century ones. It uses data to show us the real nature of social relations and, by doing that, forces us to rethink concepts that have come to seem natural and inevitable. One of these is the concept of the market, which is often imagined as a self-optimizing mechanism it is a mistake to interfere with, but which in fact, left to itself, continually increases inequality. Another concept, closely related, is meritocracy, which is often imagined as a guarantor of social mobility but which, Piketty argues, serves mainly to make economic winners feel virtuous. Piketty says that for thirty years after 1945 a high rate of growth in the advanced economies was accompanied by a rise in incomes that benefitted all classes. Severe wealth inequality came to seem a thing of the past (which is why, in 1980, people could quite reasonably call Marx’s predictions mistaken). It now appears that those thirty years were an anomaly. The Depression and the two world wars had effectively wiped out the owners of wealth, but the thirty years after 1945 rebooted the economic order. “The very high level of private wealth that has been attained since the nineteen-eighties and nineteen-nineties in the wealthy countries of Europe and in Japan,” Piketty says, “directly reflects the Marxian logic.” Marx was correct that there is nothing naturally egalitarian about modern economies left to themselves. As Piketty puts it, “There is no natural, spontaneous process to prevent destabilizing, inegalitarian forces from prevailing permanently.” The tendency of the system to increase inequality was certainly true in Marx’s own century. By 1900, the richest one per cent of the population in Britain and France owned more than fifty per cent of those nations’ wealth; the top ten per cent owned ninety per cent. We are approaching those levels again today. In the United States, according to the Federal Reserve, the top ten per cent of the population owns seventy-two per cent of the wealth, and the bottom fifty per cent has two per cent. About ten per cent of the national income goes to the top two hundred and forty-seven thousand adults (one-thousandth of the adult population). This is not a problem restricted to the rich nations. Global wealth is also unequally distributed, and by the same ratios or worse. Piketty does not predict a worldwide working-class revolution; he does remark that this level of inequality is “unsustainable.” He can foresee a time when most of the planet is owned by billionaires. Marx was also not wrong about the tendency of workers’ wages to stagnate as income for the owners of capital rises. For the first sixty years of the nineteenth century—the period during which he began writing “Capital”—workers’ wages in Britain and France were stuck at close to subsistence levels. It can be difficult now to appreciate the degree of immiseration in the nineteenth-century industrial economy. In one period in 1862, the average workweek in a Manchester factory was eighty-four hours. It appears that wage stagnation is back. After 1945, wages rose as national incomes rose, but the income of the lowest earners peaked in 1969, when the minimum hourly wage in the United States was $1.60. That is the equivalent of $10.49 today, when the national minimum wage is $7.25. And, as wages for service-sector jobs decline in earning power, the hours in the workweek increase, because people are forced to take more than one job.
|
/**
* Converts Ozone delegation token to @{@link TokenProto}.
* @return tokenProto
*/
public static TokenProto convertToTokenProto(Token<?> tok) {
if(tok == null){
throw new IllegalArgumentException("Invalid argument: token is null");
}
return TokenProto.newBuilder().
setIdentifier(getByteString(tok.getIdentifier())).
setPassword(getByteString(tok.getPassword())).
setKind(tok.getKind().toString()).
setService(tok.getService().toString()).build();
}
|
/* approach :
--> as we are given the starting red color node(x), the only place to start coloring blue that ensures blue to win is to color any adjacent node of x to blue.
--> if we color left child of x to blue then we can stop the red color player to color any node of subtree starting from the left child of x.
--> similar goes for right child for x.
--> if we initially color parent node of x to blue , then we can stop the red color player to color upper nodes of x and their remaining children.
--> in either case blue player will be coloring only 1 part(left subtree/right subtree/ parent tree) and rest 2 parts are colored by red.
--> find # nodes in any 1 part > # of nodes in remaining 2 parts , if yes then blue can win else cann't.
*/
class Solution {
public boolean btreeGameWinningMove(TreeNode root, int n, int x) {
TreeNode redColor = findRedColor(root,x);
int leftSubtree = count(redColor.left,null);
int rightSubtree = count(redColor.right,null);
int parentTree = count(root,redColor);
if(leftSubtree> rightSubtree + parentTree)return true;
if(rightSubtree> leftSubtree + parentTree)return true;
if(parentTree> rightSubtree + leftSubtree)return true;
return false;
}
private TreeNode findRedColor(TreeNode root,int x){
if(root==null)return null;
if(root.val==x)return root;
TreeNode l = findRedColor(root.left,x);
if(l!=null)return l;
return findRedColor(root.right,x);
}
private int count(TreeNode root, TreeNode redColor){
if(root==null || root==redColor)return 0;
return 1 + count(root.left,redColor) + count(root.right,redColor);
}
}
|
SACRAMENTO, California (Reuters) - Pellet guns and other toy weaponry should not be allowed to look like the real thing, a California lawmaker said Friday, vowing to introduce a bill to ban such replicas following the accidental killing of a 13-year-old boy by sheriff’s deputies.
The proposal by State Senator Kevin de Leon would require imitation guns and rifles, including those used in civilian mock battle games, to be brightly colored, rather than in dark tones that make them look authentic.
“When officers must make split second decisions on whether or not to use deadly force, these replica firearms can trigger tragic consequences,” said de Leon, a Los Angeles Democrat who tried but failed to get a similar bill through the legislature in 2011.
“By making toy guns more obvious to law enforcement, we can help families avoid the terrible grief of losing a child,” he said.
Citing the accidental killing of teenager Andy Lopez Cruz on October 22, De Leon promised on Friday to reintroduce his bill when the legislative session reopens in January.
The eighth-grader was walking near his home in the wine-country town of Santa Rosa, carrying a pellet rifle he planned to return to a friend when he was shot dead by sheriff’s deputies who mistook the imitation gun for the real thing.
The shooting, by a veteran deputy and firearms instructor, has sparked almost daily protests in California, and the FBI has launched an independent investigation.
The boy’s family has filed a civil rights lawsuit against Erick Gelhaus, 48, the Sonoma County sheriff’s deputy who shot Andy. The suit, which also names the county as a defendant, seeks unspecified damages and alleges the deputy violated the teen’s civil rights.
GUN POLITICS
De Leon’s latest effort to ban toy weapons that could be mistaken for actual firearms comes as California struggles to find a balance between support for stringent gun control rules among majority Democrats in the legislature, and a strong push by Governor Jerry Brown, also a Democrat, to take a more centrist approach.
Last month, Brown vetoed a measure that would ban guns with removable high-capacity ammunition magazines in the state, but signed several others, including one prohibiting kits that proponents say allow owners to convert guns into assault-style weapons.
De Leon’s bill passed the Senate but died in committee in the more conservative Assembly in 2011. Opponents included toy-gun manufacturers and the National Rifle Association. They argued that coloring the guns could create a false impression that they are harmless.
Opponents have also contended that criminals might color their real weapons to confuse police. But Aaron Maguire, a legislative representative for the California State Sheriffs’ Association, noted that lawbreakers could already do that if they wanted to.
De Leon says that this time public opinion is on his side, with the death of Andy Lopez Cruz so fresh in people’s minds.
“The reality is that there are many Andy Lopezes, not just throughout the state but throughout the entire nation,” de Leon said. “What happened in Santa Rosa on October 22 I believe was a watershed moment for the whole country.”
The 2011 bill, which had the support of Los Angeles Police Chief Charlie Beck, was introduced after police in Southern California shot and severely wounded another 13-year-old, Rohayent Gomez, de Leon said.
De Leon said he has not yet spoken to Brown or to Republican colleagues about his proposal.
|
package org.redquark.ramanujan.mustdos;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.redquark.ramanujan.mustdos._020InsertANodeAtMiddle.Node;
/**
* @author <NAME>
*
*/
class _020InsertANodeAtMiddleTest {
private _020InsertANodeAtMiddle list;
@BeforeEach
void setUp() throws Exception {
// Initializing the singly linked list
list = new _020InsertANodeAtMiddle();
// Adding some elements in the list
list.add(Integer.valueOf(1));
list.add(Integer.valueOf(2));
list.add(Integer.valueOf(3));
list.add(Integer.valueOf(4));
list.add(Integer.valueOf(5));
list.add(Integer.valueOf(7));
}
@AfterEach
void tearDown() throws Exception {
list = null;
}
/**
* Test method for
* {@link org.redquark.ramanujan.mustdos._020InsertANodeAtMiddle#insert(int)}.
*/
@Test
void testInsert() {
StringBuilder actual = new StringBuilder();
Node head = list.insert(6);
while (head != null) {
actual.append(head.data).append(" ");
head = head.next;
}
StringBuilder expected = new StringBuilder();
expected.append("7 5 4 6 3 2 1 ");
assertEquals(expected.toString(), actual.toString());
}
}
|
// parsePage parses a file from src and returns a page.
func (s *Site) parsePage(src string) (*Page, error) {
b, err := os.ReadFile(src)
if err != nil {
return nil, err
}
p := &Page{MetaTags: make(map[string]string), s: s}
c, err := frontmatter.Parse(string(b), p)
if err != nil {
return nil, fmt.Errorf("%s: failed to parse frontmatter: %w", src, err)
}
if s.tpl.Lookup(p.Template) == nil {
return nil, fmt.Errorf("%s: the template %s specified is not defined", src, p.Template)
}
if p.Title == "" || p.Template == "" || p.URI == "" {
return nil, fmt.Errorf("%s: missing required frontmatter parameter (title, template, uri)", src)
}
if !strings.HasSuffix(p.URI, ".html") {
p.URI = p.URI + "/index.html"
}
switch filepath.Ext(src) {
case ".html":
p.Content = c
case ".md":
p.Content = string(blackfriday.Run([]byte(c)))
default:
return nil, fmt.Errorf("%s: format does not supported", src)
}
return p, nil
}
|
3D modelling of coupled electromagnetic-mechanical responses in REBCO coils involving tape inhomogeneity
Electromagnetic and mechanical properties are crucial components of high-temperature superconducting magnet in high-field applications. In this research, in order to predict the multi-field behaviors of REBCO coils during the ramping process, the coupled electromagnetic-mechanical model is constructed with consideration of the strain and magnetic field dependences of the critical current of coated conductors (CCs). The 3D modelling is used to characterize the longitudinal in-homogenous critical current of a CC, which also allows for handling the coil with local defects. To verify the reliability of the coupled model, a comparison of numerical simulations with experiments for a small REBCO coil is first performed with special attention on the hoop strain evolution during the magnetization process. On this basis, the coupled model is then utilized to study the influences of local critical current non-uniformity defined by a Gaussian statistical distribution. The numerical analysis shows that, the tape inhomogeneity has an obvious impaction on decreasing the critical current of REBCO coil. And in high-field scenario, the calculated critical current of coil is highly reduced when the mutual interaction between electromagnetic and mechanical fields is considered. Afterwards, a detailed comparative study is carried out in studying the screening current effects of REBCO coil with and without taking the tape inhomogeneity into account. Finally, the effects of cooling-down process, co-winding materials and local defects are investigated to understand their role in electromagnetic-mechanical response of high-field REBCO coils.
|
/**
* Create a new buffered image with the same characteristics (color model,
* raster type, properties...) than the specified one.
*
* @param width the width
* @param height the height
* @param image an image with the same characteristics than the one which
* will be created.
* @return
*/
public static BufferedImage createBufferedImage(int width, int height,
BufferedImage image) {
Hashtable<String, Object> properties = null;
String[] propertyNames = image.getPropertyNames();
if (propertyNames != null) {
properties = new Hashtable<>(propertyNames.length);
for (String propertyName : propertyNames) {
properties.put(propertyName, image.getProperty(propertyName));
}
}
return new BufferedImage(
image.getColorModel(),
image.getRaster().createCompatibleWritableRaster(width, height),
image.isAlphaPremultiplied(),
properties);
}
|
def select_starting_destinations(self, game, destinations):
self.info = game.get_player_info(self)
self.edge_claims = game.get_edge_claims()
combinations = [[0, 1], [1, 2], [0, 2], [0, 1, 2]]
possible_destination_comb = []
costs = []
for combination in combinations:
possible_destination = []
for index in combination:
possible_destination.append(destinations[index])
path, all_path = self.find_best_path(game, possible_destination)
if path is None:
costs.append(float("inf"))
possible_destination_comb.append(possible_destination)
else:
possible_destination_comb.append(possible_destination)
costs.append(path.cost - self.Ticket_Score_Multiplier * path.score)
min_index = costs.index(min(costs))
selected_destinations = possible_destination_comb[min_index]
return selected_destinations
|
// Events streams QEMU QMP Events.
// If a problem is encountered setting up the event monitor connection
// an error will be returned. Errors encountered during streaming will
// cause the returned event channel to be closed.
func (rpc *LibvirtRPCMonitor) Events() (<-chan Event, error) {
events, err := rpc.l.Events(rpc.Domain)
if err != nil {
return nil, err
}
c := make(chan Event)
go func() {
for e := range events {
qe, err := qmpEvent(&e)
if err != nil {
close(c)
break
}
c <- *qe
}
}()
return c, nil
}
|
import { DeploySlashCommand } from "./deployslash";
import { EndGameCommand } from "./endgame";
import { GlobalBlacklistCommand } from "./gblacklist";
import { KillCommand } from "./kill";
import { RandomCaseCommand } from "./randomcase";
import { StartGameCommand } from "./startgame";
import { StartPerpetualGameCommand } from "./startperpetualgame";
import { TestCommand } from "./test";
export const CommandList = [
new GlobalBlacklistCommand(),
new RandomCaseCommand(),
new TestCommand(),
new KillCommand(),
new StartGameCommand(),
new StartPerpetualGameCommand(),
new EndGameCommand(),
new DeploySlashCommand()
];
|
<reponame>KatPfeifer/LimitingReagentSim
import analysisButton from "../objects/analysisButton";
import button from "../objects/button";
import buttonOutline from "../objects/buttonOutline";
export default class ef extends Phaser.Scene{
private background: Phaser.GameObjects.Image;
private background2: Phaser.GameObjects.Image;
private pdt: Phaser.GameObjects.Image;
private spec: button;
private temp: button;
private precip: button;
private backButton: button;
private backOutline: buttonOutline;
private specOutline: buttonOutline;
private tempOutline: buttonOutline;
private precipOutline: buttonOutline;
constructor(){
super({key: 'efScene'});
}
create(){
this.background=this.add.image(200, 200, "bluebackground");
this.background.setScale(2.0);
this.background2=this.add.image(600, 200, "bluebackground");
this.background2.setScale(2.0);
this.pdt=this.add.image(200, 200, "EFPdt");
this.pdt.setScale(0.5);
this.add.text(400, 50, "Choose a method of analysis:", {fontFamily: "calibri", fill: '000000'});
this.spec=new button(this, 475, 125, "spec", 0.7);
this.spec.on('pointerdown', ()=>this.goToSpec(), this);
this.specOutline = new buttonOutline(this, 475, 125, "spec", 0.7, 0x184a01);
this.spec.on('pointerover', ()=>this.specOutline.enterHoverState(), this);
this.spec.on('pointerout', ()=>this.specOutline.exitHoverState("word"), this);
this.precip=new button(this, 475, 305, "precip", 0.7);
this.precip.on('pointerdown', ()=>this.goToPrecip(), this);
this.precipOutline = new buttonOutline(this, 475, 305, "precip", 0.7, 0x184a01);
this.precip.on('pointerover', ()=>this.precipOutline.enterHoverState(), this);
this.precip.on('pointerout', ()=>this.precipOutline.exitHoverState("word"), this);
this.temp=new button(this, 475, 215, "temp", 0.7);
this.temp.on('pointerdown', ()=>this.goToTemp(), this);
this.tempOutline = new buttonOutline(this, 475, 215, "temp", 0.7, 0x184a01);
this.temp.on('pointerover', ()=>this.tempOutline.enterHoverState(), this);
this.temp.on('pointerout', ()=>this.tempOutline.exitHoverState("word"), this);
this.backButton=new button(this, 750, 375, "backButton", 0.7);
this.backButton.on('pointerdown', ()=>this.goToMain(), this);
this.backOutline = new buttonOutline(this, 750, 375, "backButton", 0.7, 0x002607);
this.backButton.on('pointerover', ()=>this.backOutline.enterHoverState(), this);
this.backButton.on('pointerout', ()=>this.backOutline.exitHoverState("word"), this);
}
update(){
}
goToSpec(){
this.scene.start('SpecScene', ["EF", []]);
}
goToPrecip(){
this.scene.start('PrecipScene', ["EF", []]);
}
goToTemp(){
this.scene.start('TempScene', ["EF", []]);
}
goToMain(){
this.scene.start('MainScene');
}
}
|
#include "DM_Particle.hpp"
#include <iostream>
#include <cmath>
#include <functional>
#include "Numerics_Functions.hpp"
#include "Physics_Functions.hpp"
//6. DM-nucleus scattering cross sections:
//Nuclear Helm form factor
double FormFactor_N(double q,double A,bool ldm)
{
if(ldm || q==0.0) return 1.0;
else
{
double a = 0.52*fm;
double c = (1.23*pow(A,1.0/3.0)-0.6)*fm;
double s = 0.9*fm;
double rn = sqrt(c*c+7.0/3.0*pow(M_PI*a,2.0)-5*s*s);
double qr = q*rn;
return 3.0*(sin(qr)/pow(qr,3.0)-cos(qr)/pow(qr,2.0))*exp(-q*q*s*s/2.0);
}
}
//3. DM particle struct
DM_Particle::DM_Particle()
{
mass=0.0;
sigma_n=0.0;
sigma_e = 0.0;
ldm = true;
formfactor=" ";
ZorA = " ";
screening=false;
mMediator=0.0;
}
DM_Particle::DM_Particle(double mDM,double sn,double se,bool light,std::string ff,std::string za,bool scr,double mMed)
{
mass=mDM;
sigma_n=sn;
sigma_e = se;
ldm = light;
formfactor=ff;
ZorA = za;
screening=scr;
mMediator=mMed;
}
//Atomic form factor (charge screening)
double DM_Particle::FormFactor_A(double q,int Z) const
{
if(!screening) return 1.0;
else
{
double a2q2 = pow(q*Thomas_Fermi_Radius(Z),2.0);
return a2q2/(1.0+a2q2);
}
}
void DM_Particle::Set_Mass(double m)
{
mass = m;
}
void DM_Particle::Set_Sigma_n(double s)
{
sigma_n = s;
sigma_e = pow(Reduced_Mass(mass,mElectron)/Reduced_Mass(mass,mProton),2.0)*sigma_n;
}
void DM_Particle::Set_Sigma_e(double s)
{
sigma_e = s;
sigma_n = pow(Reduced_Mass(mass,mProton)/Reduced_Mass(mass,mElectron),2.0)*sigma_e;
}
//DM form factor
double DM_Particle::FormFactor(double q) const
{
//Contact interactions
if(formfactor=="Contact") return 1.0;
//General dark photon
else if(formfactor=="General") return (qRef*qRef+mMediator*mMediator)/(q*q+mMediator*mMediator);
//Long range interaction
else if (formfactor=="Long-Range") return qRef*qRef/q/q;
//Electric dipole interaction
else if (formfactor=="Electric-Dipole") return qRef/q;
//Error
else
{
std::cerr <<"Error in FormFactor(): Form factor "<<formfactor <<"not recognized."<<endl;
std::exit(EXIT_FAILURE);
}
}
//Zero momentum transfer spin-independent cross-section
double DM_Particle::sigmaSI(int Z,double A) const
{
double X=-1.0;
double mAux=-1.0;
if(ZorA=="Z")
{
X=Z;
mAux = mProton;
}
else if (ZorA=="A")
{
X=A;
mAux = mNucleon;
}
else
{
std::cerr <<"Error in sigmaSI: ZorA = " <<ZorA <<" not recognized."<<endl;
std::exit(EXIT_FAILURE);
}
return sigma_n*pow(Reduced_Mass(mass,NucleusMass(A)),2.0)/pow(Reduced_Mass(mass,mAux),2.0)*pow(X,2.0);
}
//Differential cross-sections
double DM_Particle::dSdER(double ER,int Z,double A,double vDM) const
{
double mA = A*mNucleon;
double ERmax = 2.0*pow(Reduced_Mass(mass,mA)*vDM,2.0)/mA;
double q = sqrt(2.0*mA*ER);
if(q==0&&(formfactor=="Electric-Dipole"||formfactor=="Long-Range")&&screening) q=1e-15*keV; //Screening cause 1/q to cancel
return sigmaSI(Z,A) / ERmax * pow(FormFactor_N(q,A,ldm),2.0) * pow(FormFactor(q),2.0) * pow(FormFactor_A(q,Z),2.0);
}
double DM_Particle::dSdq2(double q,int Z,double A,double vDM) const
{
double mA = A*mNucleon;
double qMax2 = 4.0*pow(Reduced_Mass(mass,mA)*vDM,2.0);
if(q==0&&(formfactor=="Electric-Dipole"||formfactor=="Long-Range")&&screening) q=1e-15*keV; //Screening cause 1/q to cancel
return sigmaSI(Z,A) / qMax2 * pow(FormFactor_N(q,A,ldm),2.0) * pow(FormFactor(q),2.0)* pow(FormFactor_A(q,Z),2.0);
}
double DM_Particle::Sigma_Tot(int Z,double A,double vDM) const
{
if(!ldm)
{
double mA = A*mNucleon;
double ERmax = 2.0*pow(Reduced_Mass(mass,mA)*vDM,2.0)/mA;
double ERmin = 0.0;
//integrate the diff cross section
auto dodER = std::bind(&DM_Particle::dSdER,this,std::placeholders::_1,Z,A,vDM);
//Numerical integration
double epsilon = Find_Epsilon(dodER,ERmin,ERmax,1e-5);
double integral =Integrate(dodER,ERmin,ERmax,epsilon);
return integral;
}
else
{
double result = sigmaSI(Z,A);
double q2max = pow(2.0*Reduced_Mass(mass,A*mNucleon)*vDM,2.0);
double a2=pow(Thomas_Fermi_Radius(Z),2.0);
//General interaction
if(formfactor == "General")
{
double m2 = pow(mMediator,2.0);
if(screening)
{
result*=(pow(a2,2.0)*pow(m2 + qRef*qRef,2.0)*(((a2*m2-1.0)*q2max*(q2max + m2*(2.0 + a2*q2max)))/((m2 + q2max)*(1.0 + a2*q2max)) + 2.0*m2*log((m2 + q2max)/(m2 + a2*m2*q2max))))/(pow(a2*m2-1.0,3.0)*q2max);
}
else
{
result*= pow(qRef*qRef+m2,2.0)/m2/(m2+q2max);
}
}
//Contact interaction
else if(formfactor == "Contact" && screening)
{
result *=(1.0+1.0/(1+a2*q2max)-2.0/a2/q2max*log1p(a2*q2max));
}
//Electric dipole interaction
else if(formfactor == "Electric-Dipole")
{
result *= pow(qRef,2.0)/q2max*(log1p(a2*q2max)-a2*q2max/(1+a2*q2max));
}
//Long range interaction
else if(formfactor== "Long-Range")
{
result *= pow(a2,2.0)*pow(qRef,4.0)/(1+a2*q2max);
}
return result;
}
}
//Find the stopping cross section
double Stopping_CrossSection(const std::string& detector,double sigma,double mDM)
{
if(detector == "Semiconductor"||detector=="SENSEI-surface"||detector=="SENSEI"||detector=="SuperCDMS"||detector=="DAMIC-M"||detector == "XENON10e"||detector == "XENON100e"||detector =="DarkSide-50")return pow(Reduced_Mass(mDM,mProton)/Reduced_Mass(mDM,mElectron),2.0)*sigma;
else return sigma;
}
//7. DM speed distribution
double SpeedDistribution(double v,double vEarth)
{
return M_PI*v*v0*v0/Nesc/vEarth*(2*exp(-(v*v+vEarth*vEarth)/v0/v0)*sinh(2*v*vEarth/v0/v0)+(exp(-pow(v+vEarth,2.0)/v0/v0)-exp(-vesc*vesc/v0/v0))*StepFunction(abs(v+vEarth)-vesc)-(exp(-pow(v-vEarth,2.0)/v0/v0)-exp(-vesc*vesc/v0/v0))*StepFunction(abs(v-vEarth)-vesc) );
}
double Average_Speed(double vEarth,double vMin)
{
//1. integrand
std::function<double(double)> integrand = [vEarth] (double v)
{
return v*SpeedDistribution(v,vEarth);
};
//2. integrate
double vMean = Integrate(integrand,vMin,(vesc+vEarth),1e-6);
//3. Renormalize
if(vMin>0.0)
{
std::function<double(double)> integrand2 = [vEarth] (double v)
{
return SpeedDistribution(v,vEarth);
};
double norm=Integrate(integrand2,vMin,vesc+vEarth,1e-6);
vMean/=norm;
}
return vMean;
}
|
<filename>crates/eldiro-cli/src/main.rs
use rustyline::Editor;
fn main() {
let mut rl = Editor::<()>::new();
let history_file = ".eldiro_history";
if rl.load_history(history_file).is_err() {
println!("No previous history.");
}
let mut env = eldiro::Env::default();
loop {
let readline = rl.readline("→ ");
match readline {
Ok(line) => {
let line = line.as_str().trim();
rl.add_history_entry(line);
match eval(line, &mut env) {
Ok(Some(val)) => println!("{}", val),
Ok(None) => {}
Err(msg) => println!("{}", msg),
}
}
Err(err) => {
println!("{:?}", err);
break;
}
}
}
rl.save_history(history_file).unwrap();
}
fn eval(input: &str, env: &mut eldiro::Env) -> Result<Option<eldiro::Val>, String> {
let parse = eldiro::parse(input).map_err(|msg| format!("Parse error: {}", msg))?;
let evaluated = parse
.eval(env)
.map_err(|msg| format!("Evaluation error: {}", msg))?;
if evaluated == eldiro::Val::Unit {
Ok(None)
} else {
Ok(Some(evaluated))
}
}
|
<reponame>t-yanaka/zabbix-report
# coding: utf-8
from rest_framework import serializers
from .models import User, Entry
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('name', 'mail')
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = ('title', 'body', 'created_at', 'status', 'author')
|
<filename>src/presentation/protocols/http-request.ts
export interface HttpRequest<B = any, P = any> {
body?: B
headers?: any
params?: P
query?: any
}
|
<reponame>pipauwel/IFCtoLBD<filename>src/main/java/nl/tue/isbe/BOT/Interface.java
package nl.tue.isbe.BOT;
/*
*
* Copyright 2019 <NAME>, Eindhoven University of Technology
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.buildingsmart.tech.ifcowl.vo.IFCVO;
import nl.tue.isbe.ifcspftools.Guid;
import java.util.ArrayList;
import java.util.List;
public class Interface {
private Guid guid = new Guid();
private String name;
private long lineNum;
private Space relatingSpace;
private Element relatedElement;
private IFCVO lineEntry;
public static List<Interface> interfaceList = new ArrayList<Interface>();
public Interface(IFCVO lineEntry, Guid guid){
this.lineEntry = lineEntry;
lineNum = lineEntry.getLineNum();
name = "interface_"+lineEntry.getLineNum();
interfaceList.add(this);
this.guid = guid;
}
//------------
// ACCESSORS
//------------
public Guid getGuid() {
return guid;
}
public long getLineNum() {
return lineNum;
}
public String getName() {
return name;
}
/*This should only be run once, from the constructor. It creates the correct name, based on the IFC input, which is not stored*/
public IFCVO getLineEntry(IFCVO lineEntry) {
return lineEntry;
}
private void setConnectionGeometry(){
/*#154= IFCCARTESIANPOINT((-13084.0719280682,-9080.72887478923,0.));
#156= IFCCARTESIANPOINT((8800.,0.));
#158= IFCCARTESIANPOINT((8800.,10300.));
#160= IFCCARTESIANPOINT((5800.,10300.));
#162= IFCCARTESIANPOINT((-0.,10300.));
#164= IFCPOLYLINE((#9,#156,#158,#160,#162,#9));
#166= IFCAXIS2PLACEMENT3D(#154,#21,#15);
#167= IFCPLANE(#166);
#168= IFCCURVEBOUNDEDPLANE(#167,#164,());
#170= IFCCONNECTIONSURFACEGEOMETRY(#168,$);*/
}
public Space getRelatingSpace() {
return relatingSpace;
}
public void setRelatingSpace(Space relatingSpace) {
this.relatingSpace = relatingSpace;
}
public Element getRelatedElement() {
return relatedElement;
}
public void setRelatedElement(Element relatedElement) {
this.relatedElement = relatedElement;
}
}
|
The night started off as brisk and swift as a hunch perforation for the events to come. Nothing troublesome, except as sense of urgency in the way. I have been reading books all day long, taking time to immense myself in the ageless wonders some of the stories have produce all the while moving into a more converse spot to assimilate the task at hand.
I was going to take a test that was going to determine whether I passed a class or not, and I needed a last batch of studies to get my confidence and stigma going into the positive direction. I had taken things real slow over the past couple of days, hoping to instead focus on intellectual readings to crowd my mind and ease some of the responsibilities out of the way. This in turn led me to face the dilemma of how in the world was I going to study for an important test at the last minute needing to pass?
The pondering thought had a simple solution, with dire consequences if not administered in the correct fashion. A stimulant that will give me laser sharp focus and memory retention that has long lasting effect, and above all else, a powerful kick to keep me up all within moments time. I am talking, of course, about Adderall. Very powerful stimulant indeed. I was beginning to take interest in this drug and all the possibilities it might entail. Parties, studies, thinking, sex, reading… Man, it was wonderful.
I took it once three days before to do a report on bankruptcies for my Law class, while also taking the time to study for my final test. The effects were inducing and invigorating. Everything I did was done with the tenacious determination and concentration. In two hours I manage to finish all my responsibilities and had enough confidence and time to leave them all at my disposal for another time. The rest of the night I used it to read and think, going to sleep at the comfortable hour of 2am.
The next time I took the Adderall, this time to finish what I started in my first binge, resulted in one of the craziest days of my entire life.
Everything was all set for Thursday night. The air was calm and no one was at my vicinity with the intent to disturb my peace. I had enjoyed the early parts of the day the same way I have done for well over two weeks now, in the fine comfort of books of all kinds. There was still more than two hours to kick before the tentative hour of consumption at exactly 1am, so I spent the rest of the time going through YouTube and Google, trying to distance myself from everything. It was only a matter of time before my plan was to be executed to perfection and let my neurons completely fuck over each other in the waning moments of the night.
***
The Adderall didn’t take effect until approximately two full hours since its original consumption. By then, I had taken another 15 milligrams to serve as an extensor to my original dose and give me all the energies I needed to survive for almost 8 hours in these conditions. I wanted to test the limits of what my mind, body, and soul could support.
The first time I had taken the drug my body welcomed it with open arms. The second time it was even more receptive. My brain was in first place of the Indy 500 trying to break the sound barrier. It wasn’t listening to the outside world and did whatever it wanted to do for well over three hours. I studied, I read, and I most certainly spent a heavy amount of time staring at my window for well over an hour thinking about whatever the fuck came to mind.
Nothing in this world could explain the power of this drug. It lurked into the very fabric of the system it encounters, creating euphoria and invigoration. Either the user feels like they are the smartest human being in the world, or they feel like they could tackle all of life’s problems. It was around these later effects that I started to take my thoughts on a more romantic level, and once I did, nothing in this whole wide world could take me away from these emotions. There’s only been one person in my life that has taken my heart prisoner for such a long time, and till this very day I still haven’t found the courage to go talk to this person. The drug made me question my idea of love, and somehow made it something bigger than me. It motivated me to fix all my problems with her so we can be together.
This lingered on for well over 4am, where there were only a mere 3 hours before my big test. I knew this wasn’t doing me any good if I kept prancing around this philosophical attempt to enclose my love for her into a more connoted meaning, but I couldn’t stop myself. My mind was a machine unable to drive away from its instructions, and right now its instructions were to completely ignore the troubles abroad. It was useless to resist fighting back.
Despite the emotional dilemma I was finding myself in for the time being, I was nevertheless having a really good time. I was happy, motivated by the array of thoughts, and calm with myself knowing this could be the way things go for three more hours. I was confident in my knowledge of the subject, and I was enjoying this prancing around. Little did I know that when the clock stroked 4:40am the entire day would be ruined.
Something went wrong. The Adderall was receding. I could feel it. My brain wasn’t working at high motor speeds, the sense of dread a normal human starts to feel when they have to face everyday life was beginning to take effect, and things started to feel a little less ordinary and productive.
I panicked. Not because I was no longer happy and as concentrated as I was before, but because taking the test without the Adderall taking effect was something I was not planning on. Things needed to be in place. I needed to be a little crazy inside the head, to remember the notes I could not have sung on other tones.
Disaster was looming. An F was approaching. I couldn’t take the pain of failing this class. No, not this one. Any other one could’ve been fine; I would deal with it on my own terms. But this one? No chance in hell I was ever going to overcome a D in this class.
In the midst of this delusional overthinking, I made the choice a scared man with nothing to lose would chose and grabbed another dose of Adderall. Fifteen milligrams now, 15 more two hours later. I didn’t want to risk taking the test with half the force, so I wanted to produce the same effects I was feeling all night long, and I thought the only way this could be done was by repeating the same pattern as before.
By 5:30, the effects were back on, but the recent events made me more preoccupied with its ability to last than to actually enjoy the effects of the drug. I was studying with fervor, distancing my other thoughts and only coming to grips with them for some mere moments of entertainment as I was trying to calm down, but the damage was already done. I wasn’t to feel the same way again for as long as the drug lasted.
I spent the rest of the night trapped as a prisoner in the mind of the relics I created by going into excess with something I didn’t fully understand. I overdid an experiment that was very risky to begin with, and could’ve landed me in some deep trouble if I kept asking for more. From now on everything I did looked very differently from the portrayal of the ambition of a once peaceful effect. I did wonders, but horrors when faced with uncertainty. A clear metaphor for the user of this drug, and how to dealt with it.
***
It was 6am. Time to leave. First I needed to stop and get some gas. I had work that same day and I wanted to be fully prepared for the long trips my work required me to do with my own car. I was afraid of driving under this circumstances, being unhappy and with much pressure. There was no one to take me on time so it was futile to ponder the thought further. Only logical option was to suck it up and control your mind as best as you can.
Didn’t take me less than 10 minutes to fill up the tank, so I was well on my way to meet my probable impending doom. I arrived at the university at 6:25am, plenty of time for one last batch of studies. There were already 60 milligrams of Adderall in my system. My mind felt the kick, but the disaster that unfolded two hours earlier completely ruined my state of mind. From then on out I would no longer feel like an intellectual. Instead, I would feel like a worthless bum trapped in his own misfortunes.
I went to sit on a corner where no one was around save for humble folks minding their own business, probably on their way to take another Finals test like me. Despite all the troubles I faced, I was still confident in my knowledge of the subject. It wasn’t really all that much information, and it was all just a deterioration of logic. For 25 minutes I looked into my notes, still feeling the kick helping me out. One of the side effects of taking the drug aside from some mood swings is the constant coldness one could feel depending on how much he concentrates in the temperature of his surroundings. That, and a knack for headaches.
It was 7am and the professors was nowhere in sight. He was never late, for anything. Probably still picking the exams up, or got lost in the headlines of the newspaper he always brought when we were taking tests. Ten minutes passed and still nothing. I was beginning to get distraught. I checked the date of the test and it concurs specifically around this same time, so no error of judgment there.
Tired of waiting, I got up from my comfortable thoughts and went inside the building. A simple one story building with two corridors on opposite direction and no less than 15 classrooms. It felt more like a high school than a prestigious college. The classroom the test was to be taking place was room 420. The door was opened, and to a sudden shock, my classmates who I gave zero fucks about were right there taking the test already, with a different professor, which I had seen earlier making his way to the building. Logic dictates my professors probably couldn’t make it on time, so he asked this other one to substitute for him.
I opened the door, grabbed the test, put my notebook below my seat, and sat down to start taking the test in peace. After all the bullshit going around my own forsaken thoughts, I made it in one piece.
I was, however, hit with a bombshell of a revelation in the very first page of the test. Seven mathematical problems I had no idea how to resolve. What the fuck is this shit? You can’t be serious? How could I missed this was coming to the test? Fuck. Fuck, fuck, fuck. Eight fucking hours in a mental condition that invites learning shit quickly and precisely and I didn’t find the time to learn that. I couldn’t believe my luck.
I saw the rest of the test, and I looked formidable. It was all answerable and easy. I did this part first hoping to calm my nerves and get my mind veered into a positive mode of thinking so I can do this test as fast as possible. I spent more than 30 minutes trying to figure out what the hell was I supposed to do with those problems. Nothing, blank as a pale dead man. It was useless to conjure up the answers, so I took the next logical step and answered them with random notes.
The test was finished. I got up from my seat and gave the test back, leaving that god forsaken place for at least two months.
On my way home I was doing some deep thinking, as the Adderall was still in my system but receding at a rapid pace. There’s a strong probability that I might not pass that test, and I was beginning to think about my future and what the fuck was I going to do for the rest of my life. Finances wasn’t what I thought it would be. The only class I actually did enjoy and went out of my way to study was ironically economics, but here I was failing that very same class. The only passionate interested that was beginning to take a hold of me was writing, about anything, when I wanted and how I wanted. Taking some courses in literature and journalism was the obvious next step, but for that I needed to make even more changes. It was becoming quite certain that this would be the path I would take in life. I will give little effort in some classes related to finances, and will instead focus much of my attention to reading and writing, hopefully to one day be really good at it and make a satisfied living with it.
All of the scenarios were being thoroughly analyzed on my way home. By the time I got to the gate, I realized I forgot my notebook. Shit, I really liked that notebook. I got inside my house, sat down for a while, and began to ponder even more. The saddest part of all this is that the commotion is far from over. I had to be awake at least for 15 more hours, and I ran out of Adderall. What will ensued would be complete and total chaos.
***
Around 9am, a friend came to my house because he needed help to study for a test he had at 1pm. Telling him I was going to help him around 2am when I reached maximum Adderall peak thinking the effects were going to last for well over 15 hours, I couldn’t say no simply because of the current condition I was in. He needed my help, and I always help out my friends regardless of my state.
He spent more than 3 hours getting ready himself. He took 15 milligrams of Adderall that I had given him two days before and he was off to the races. I, however, was transit and immovable. I was a fucking vegetable, unable to feel anything nor give a shit about anything. I was beginning to feel tired.
The Adderall was still in my system, but now it was doing the complete opposite of making me concentrate. My estimates are that I probably overdosed. My friend told me I was behaving like I was incredibly high on pot and my surroundings were a detriment to who I was. I stared blankly on the floor for hours, and even had a hard time contemplating even the idea of eating a meal. An abominable mess with no way to change for about three more hours, and even when the effects wear off I was going to be tired as fuck.
The deadline was approaching for my friend to go, until I had the dumbest idea that could’ve beseeched my brain under these conditions. I offered to take him to his tests with my car. Seriously? After all I’ve been through? It was at this moment that I realized that I was in fact as crazy as I thought I was, and he’s probably crazier because he actually said yes. I mean, I did wanted to get back my notebook, but to go the lengths of taking him there myself? Stupid.
There was nothing I could do now. I had already told him yes and he was already packing his things on my car. The entire day was full of bad decisions so it was kind of poetic I was to make another one. I told him to talk the entire time about interesting things we always liked to discussed about so that was I don’t lose myself in sleep, and luckily he did just that the entire way. Thanks to that we didn’t face any problems on both trips.
We reached the university by 1:30pm, 30 minutes late but still fine. He went to take his test and I went to the library to meet up with some friends and wait for him after I realized there place I left my notebook was locked. I waited for two fucking hours for my friend to finish a one hour test because the motherfucker had low blood pressure in the middle of the test and needed 20 minutes to cool off before continuing.
I spent one hour in the library and 45 minutes strolling around the university and all its little building. He got out of his test around 3:30pm, where I was already on the verge of a mental breakdown. We got out of there as quickly as possible and we returned home by 3:50pm so he could go elsewhere to run some errands. As for me, I still needed to be awake for 7 more hours because I had a shift from work at 7pm. Fantastic stuff.
I spent the rest of the day being consumed by books and reading, all to go along the loudest music any human can withhold for a period of 3 hours before taking off for work. I finished my shift without any accident or any mistake in terms of orders, so you could say I did pretty well pending the circumstances of me being illiterate, immovable, in emotional distress, and tired, with a big headache going around the side.
I got home at about 11pm and went straight to bed in one piece, with plenty of decisions to make about my future but all the time in the world to think them through, thus concluding one of the most fucked up days one could ever live.
Like Us on Facebook
Follow on Twitter
Advertisements
|
Contact dermatitis caused by urethane acrylates in a hearing aid
Several articles have appeared in the medical literature presenting the arithemetic mean as a summary index for the evaluation of ordinal data such as patch test responses. The use of the mean in such situations is inappropriate. Consider a hypothetical study to compare the sensitizing potential of 2 components (chemical A and chemical B) in a mixture X. The grading scale of the patch test response may take the following form, with the assignment of an arbitrary value of 0 for no response: 0.5 for a +Ireaction, 1 for a + reaction, 2 for a + + reaction and 3 for a + + + reaction. 10 subjects sensitized to the mixture X who were patched tested with chemical A all had a +Ireaction, while 2 out of the 10 subjects patch tested with chemical B had a + + response. Mean responses of 0.5 for chemical A and 0.4 for chemical B are obtained. Does this imply that chemical B, with the lower mean score, has a lower "sensitizing capacity" as compared to chemical A? Another component of the mixture, chemical C, was subsequently tested and produced a mean response score of 1.7. What does this value of 1. 7 represent? Gardner & Altman (1) have stated that "for ordinal ordered qualitative data, the calculation of means and standard deviations is incorrect; instead, proportions should be reported." The use of means and standard deviations as summary indices should be confined solely to interval or ratio data.
|
from __future__ import annotations
import tempfile
import os
import json
import logging
from typing import Optional
from transformers import Wav2Vec2Processor, AutoConfig, AutoTokenizer, AutoFeatureExtractor
class TokenSet():
"""
TokenSet
Parameters
----------
tokens : list[str]
List of tokens.
blank_token : Optional[str] = "<pad>"
Blank token
silence_token : Optional[str] = "|"
Silence token
unk_token : Optional[str] = "<unk>"
Unk token
bos_token : Optional[str] = "<s>"
BOS token
eos_token : Optional[str] = "</s>"
EOS token
letter_case: str
Case mode to be applied to the transcription, can be 'lowercase', 'uppercase'
or None (None == keep the original letter case). Default is lowercase.
"""
def __init__(self, tokens: list[str], blank_token: Optional[str] = "<pad>", silence_token: Optional[str] = "|", unk_token: Optional[str] = "<unk>",
bos_token: Optional[str] = "<s>", eos_token: Optional[str] = "</s>", letter_case: str = "lowercase"):
self.tokens = tokens
self.blank_token = blank_token
self.silence_token = silence_token
self.unk_token = unk_token
self.bos_token = bos_token
self.eos_token = eos_token
self.letter_case = letter_case
if self.letter_case == "lowercase":
self.tokens = [token.lower() if token not in self.special_tokens else token for token in self.tokens]
elif self.letter_case == "uppercase":
self.tokens = [token.upper() if token not in self.special_tokens else token for token in self.tokens]
if blank_token not in tokens:
logging.warning(f"blank_token {blank_token} not in provided tokens. It will be added to the list of tokens")
self.tokens.append(blank_token)
if silence_token not in tokens:
logging.warning(f"silence_token {silence_token} not in provided tokens. It will be added to the list of tokens")
self.tokens.append(silence_token)
if unk_token not in tokens:
logging.warning(f"unk_token {unk_token} not in provided tokens. It will be added to the list of tokens")
self.tokens.append(unk_token)
if bos_token not in tokens:
logging.warning(f"bos_token {bos_token} not in provided tokens. It will be added to the list of tokens")
self.tokens.append(bos_token)
if eos_token not in tokens:
logging.warning(f"eos_token {eos_token} not in provided tokens. It will be added to the list of tokens")
self.tokens.append(eos_token)
self.id_by_token = {token: i for i, token in enumerate(self.tokens)}
self.token_by_id = {i: token for i, token in enumerate(self.tokens)}
@property
def blank_token_id(self):
return self.id_by_token[self.blank_token]
@property
def silence_token_id(self):
return self.id_by_token[self.silence_token]
@property
def unk_token_id(self):
return self.id_by_token[self.unk_token]
@property
def bos_token_id(self):
return self.id_by_token[self.bos_token]
@property
def eos_token_id(self):
return self.id_by_token[self.eos_token]
@property
def non_special_tokens(self):
return [token for token in self.tokens if token not in self.special_tokens]
@property
def special_tokens(self):
return [self.blank_token, self.silence_token, self.unk_token, self.bos_token, self.eos_token]
@property
def size(self):
return len(self.tokens)
def to_processor(self, model_name_or_path: str="facebook/wav2vec2-large-xlsr-53"):
tokens_dict = {v: i for i, v in enumerate(self.tokens)}
with tempfile.TemporaryDirectory() as tmpdirname:
vocab_path = os.path.join(tmpdirname, "vocab.json")
with open(vocab_path, "w") as vocab_file:
json.dump(tokens_dict, vocab_file)
config = AutoConfig.from_pretrained(model_name_or_path)
config_for_tokenizer = config if config.tokenizer_class is not None else None
tokenizer_type = config.model_type if config.tokenizer_class is None else None
tokenizer = AutoTokenizer.from_pretrained(
tmpdirname,
config=config_for_tokenizer,
tokenizer_type=tokenizer_type,
bos_token=self.bos_token,
eos_token=self.eos_token,
unk_token=self.unk_token,
pad_token=self.blank_token,
word_delimiter_token=self.silence_token,
do_lower_case=False,
# do_lower_case=self.letter_case == "lowercase",
#TODO: fix transformers/models/wav2vec2/tokenization_wav2vec2.py:199
)
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name_or_path)
return Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
@classmethod
def from_processor(cls, processor: Wav2Vec2Processor, letter_case: str = "lowercase"):
blank_token = processor.tokenizer.pad_token
silence_token = processor.tokenizer.word_delimiter_token
unk_token = processor.tokenizer.unk_token
bos_token = processor.tokenizer.bos_token
eos_token = processor.tokenizer.eos_token
tokens = [x for x in processor.tokenizer.convert_ids_to_tokens(range(0, processor.tokenizer.vocab_size))]
return cls(tokens, blank_token, silence_token, unk_token, bos_token, eos_token, letter_case)
def save(self, path: str):
with open(path, "w", encoding="utf-8") as f:
json.dump(self.__dict__, f, indent=2, ensure_ascii=False)
@classmethod
def load(cls, path: str):
with open(path, encoding="utf-8") as f:
o = json.load(f)
return cls(o["tokens"], o["blank_token"], o["silence_token"], o["unk_token"], o["bos_token"], o["eos_token"], o["letter_case"])
|
// end anon namespace
class vtkTextPropertyLookup
: public std::map<unsigned long, vtkSmartPointer<vtkTextProperty> >
{
public:
bool contains(const unsigned long id) {return this->find(id) != this->end();}
}
|
class TokenEmbeddingPooling:
"""Method for pooling a sequence of token representation to a single representation."""
def __init__(
self,
reduction: Callable[[torch.FloatTensor, int], torch.FloatTensor],
):
"""
Initialize the module.
:param reduction:
The reduction method.
"""
self.reduction = reduction
def pool(
self,
token_embeddings: Sequence[torch.FloatTensor],
) -> torch.FloatTensor:
"""
Pool token embeddings into a single vector.
:param token_embeddings:
The individual token embeddings, each of shape: (dim,)
:return: shape: (dim,)
A single pooled token sequence representations.
"""
result = self.reduction(torch.stack(tensors=token_embeddings, dim=0), 0)
if isinstance(result, tuple):
result = result[0]
return result
|
#import <UIKit/UIKit.h>
@class TGCircularProgressView;
@interface TGSecretPeerMediaTimerView : UIView
@property (nonatomic, strong, readonly) UIImageView *infoBackgroundView;
@property (nonatomic, strong, readonly) UIImageView *timerFrameView;
@property (nonatomic, strong, readonly) TGCircularProgressView *progressView;
@property (nonatomic, strong, readonly) UILabel *progressLabel;
@end
|
Automatic parallelization of simulink applications
The parallelization of Simulink applications is currently a responsibility of the system designer and the superscalar execution of the processors. State-of-the-art Simulink compilers excel at producing reliable and production-quality embedded code, but fail to exploit the natural concurrency available in the programs and to effectively use modern multi-core architectures. The reason may be that many Simulink applications are replete with loop-carried dependencies that inhibit most parallel computing techniques and compiler transformations.
In this paper, we introduce the concept of strands that allow the data dependencies to be broken while preserving the original semantics of the Simulink program. Our fully automatic compiler transformations create a concurrent representation of the program, and thread-level parallelism for multi-core systems is planned and orchestrated. To improve single processor performance, we also exploit fine grain (equation-level) parallelism by level-order scheduling inside each thread. Our strand transformation has been implemented as an automatic transformation in a proprietary compiler and with a realistic aeronautic model executed in two processors leads to an up to 1.98 times speedup over uniprocessor execution, while the existing manual parallelization method achieves a 1.75 times speedup.
|
import time
import logging
LOGGER = logging.getLogger()
def driveIter(root, drive, mimeType):
params = {
"pageToken": None,
"supportsAllDrives": True,
"includeItemsFromAllDrives": True,
"fields": "files(id,name,mimeType,parents,shortcutDetails), incompleteSearch, nextPageToken",
"q": "'%s' in parents and trashed = false and (mimeType = 'application/vnd.google-apps.folder' or mimeType = 'application/vnd.google-apps.shortcut' or mimeType contains '%s')"
% (root["id"], mimeType),
"orderBy": "name",
}
while True:
try:
response = drive.files().list(**params).execute()
except Exception as e:
response = {"files": []}
LOGGER.error(
"\033[31mERROR RETRIEVING FILE '%s'!\033[0m" % (root["id"]),
)
LOGGER.error(str(e))
for file in response["files"]:
if mimeType in file["mimeType"]:
file["type"] = "file"
yield file
elif file["mimeType"] == "application/vnd.google-apps.folder":
file["type"] = "directory"
yield file
elif file["mimeType"] == "application/vnd.google-apps.shortcut":
tmp_file = {
"id": file["shortcutDetails"]["targetId"],
"name": file["name"],
"mimeType": file["shortcutDetails"]["targetMimeType"],
"parents": file["parents"],
}
if tmp_file["mimeType"] == "application/vnd.google-apps.folder":
tmp_file["type"] = "directory"
file = tmp_file
yield file
elif mimeType in tmp_file["mimeType"]:
tmp_file["type"] = "file"
file = tmp_file
yield file
try:
params["pageToken"] = response["nextPageToken"]
except KeyError:
return
def driveWalk(root, drive, walk, mimeType):
if root.get("mimeType") == "application/vnd.google-apps.folder":
for item in driveIter(root, drive, mimeType):
driveWalk(item, drive, walk, mimeType)
elif mimeType in root.get("mimeType"):
walk["children"].append(root)
else:
return
return walk
def driveTree(root, drive, mimeType):
if root.get("mimeType") == "application/vnd.google-apps.folder":
tree = root
tree["children"] = [
driveTree(item, drive, mimeType)
for item in driveIter(root, drive, mimeType)
]
elif mimeType in root.get("mimeType"):
tree = root
else:
return
return tree
|
Believe it or not, LaMarcus Aldridge is settled in San Antonio. (Eric Gay/Associated Press)
Since LaMarcus Aldridge and the San Antonio Spurs agreed to join their houses in the summer of 2015, the marriage has been on the rocks.
Rumblings persisted for over a year that neither side was thrilled with the arrangement, and the Spurs openly tried to ship Aldridge elsewhere this summer.
That’s what made last week’s news that Aldridge and the Spurs had agreed to a contract extension that keeps him in San Antonio for the next two years surprising to many around the league. If both sides wanted to move on for so long, why did they agree to stay together?
But as Aldridge has gotten off to a torrid start to this season, lifting the Spurs to three straight wins even as Kawhi Leonard remains sidelined, the five-time all-star believes the reason is simple: He’s back to being himself.
“I’m more confident,” Aldridge said in an interview after San Antonio’s win in Chicago Saturday. “I’m myself. I feel like I’m back to who I was before I came here.
“I feel like I’m getting different looks and I feel like I’m in the flow and I’m involved so I’m always feeling like I can make a read and take my shots. I feel like at first I was trying to fit in and trying what [Spurs Coach Gregg Popovich] wanted me to be so bad that I wasn’t being myself.”
Much has been made of the sit down between Aldridge and Popovich this summer, a much-needed clearing of the air after an uncomfortable first two years in San Antonio. Since then, Popovich has said that he regrets trying to change Aldridge’s game after the native Texan arrived in San Antonio as a free agent, blaming himself for Aldridge feeling ostracized since becoming a Spur.
But whether the good vibes on both sides are truth, lip service or somewhere in between, the results on the court speak volumes. Aldridge has looked terrific, carrying San Antonio offensively and allowing the Spurs to remain the same old Spurs even as their No. 1 option sits out.
In doing so, however, one thing has significantly changed: Aldridge’s shot attempts. After averaging fewer than 15 per game each of the past two seasons — something that hadn’t happened since his rookie season 10 years ago — Aldridge is averaging more than 20 shots per game through San Antonio’s first three contests.
It’s no surprise that has coincided with Aldridge’s renewed confidence level.
“He’s been sharp and in rhythm,” Spurs center Pau Gasol said. “It’s kind of what he’s used to.
“It’s the way he’s played his entire career, knowing he’s going to get 20-plus shots. That’s always comforting.”
This is what many — likely including Aldridge — expected when he arrived in San Antonio two years ago. Up until that point, Leonard had been a nice complementary piece offensively, steadily increasing his output but never operating as anything like the first option in an NBA offense.
The expectation was that that would continue — particularly given that Aldridge had already established himself as one of the premier scoring bigs in the league during his first nine seasons in Portland, where he’d averaged more than 20 points per game five consecutive seasons. Pairing him with a defensive-minded forward who has never seemed interested in having the spotlight on him felt like a perfect fit.
But Leonard surprised the team by graduating to its No. 1 choice on offense two years ago, then morphing into an MVP candidate last season. In both seasons, his shot attempts surpassed those of Aldridge.
So, with a revived Aldridge, what will happen when Leonard, who remains out indefinitely with tendinopathy in his right quad, eventually returns?
“I don’t know,” Aldridge admitted. “Right now it’s how it’s going, so we’ll see. But I would imagine that some of the things we’re doing will continue, [but] of course it’s going to tweak, because he’s our main guy.”
Leonard doesn’t seem to be on the verge of coming back soon. When asked about his star’s status Saturday, Popovich said, “I think it’s going to be weeks, more than months. So that’s all I know right now.” Whenever Leonard does come back, he’s unlikely to immediately return to where he was the past two seasons.
Still, even if Aldridge’s shot attempts drop when Leonard comes back, the confidence Aldridge has regained appears to be here to stay. He brought up both the idea of being himself and playing with confidence multiple times during the interview, reflecting a newfound comfort level with his place in San Antonio.
Part of that undoubtedly comes from where his relationship appears to be with Popovich. Whether out of desire, necessity or a combination of both, the conversations the two men had this summer seem to have changed things.
“Just being myself,” Aldridge said. “He’s a great coach, and I was trying to listen because I’m not a bad guy. So I was listening to what he said, but I was sort of second-guessing myself and not really playing my confident game.
“Now he’s saying, “Go be yourself.” Of course, I might take one shot that’s bad, but I’m not a guy that’s going to keep doing it, so I do one shot that I know was bad and I fixed it, so I think just knowing I have the freedom to be myself has been great so far.”
That newfound freedom has Aldridge playing like his former self. It also should help him continue to thrive in San Antonio.
More NBA:
Benching Dwyane Wade is the first step to the Cavaliers rediscovering who they really are
Vegas likes Giannis Antetokounmpo for NBA MVP. The only issue: He plays for the Bucks.
Rebuilding through the NBA draft is no sure thing. Just ask the Phoenix Suns.
Kyrie Irving’s $25,000 fine for insulting a heckler hurts. Worse, he gave away his tell.
|
/**
* @author Ivan Khalopik
* @since 1.0
*/
public class TestCollectionUtils extends Assert {
@DataProvider
public Object[][] isEmptyData() {
return new Object[][]{
{null, true},
{new ArrayList(), true},
{CollectionUtils.newConcurrentList(), true},
{CollectionUtils.newConcurrentSet(), true},
{CollectionUtils.newList(), true},
{CollectionUtils.newSet(), true},
{CollectionUtils.newSet("aaa", "bbb"), false},
{CollectionUtils.newList("aaa"), false},
{CollectionUtils.newConcurrentList("aaa"), false},
};
}
@DataProvider
public Object[][] collectionFactoryData() {
return new Object[][]{
{
CollectionUtils.newList(),
ArrayList.class,
new Object[]{}
},
{
CollectionUtils.newList(1, 2, 3),
ArrayList.class,
new Object[]{1, 2, 3}
},
{
CollectionUtils.newList(Arrays.asList("1", "2", "3")),
ArrayList.class,
new Object[]{"1", "2", "3"}
},
{
CollectionUtils.newConcurrentList(),
CopyOnWriteArrayList.class,
new Object[]{}
},
{
CollectionUtils.newConcurrentList(1, 2, 3),
CopyOnWriteArrayList.class,
new Object[]{1, 2, 3}
},
{
CollectionUtils.newConcurrentList(Arrays.asList("1", "2", "3")),
CopyOnWriteArrayList.class,
new Object[]{"1", "2", "3"}
},
{
CollectionUtils.newSet(),
HashSet.class,
new Object[]{}
},
{
CollectionUtils.newSet(1, 2, 3),
HashSet.class,
new Object[]{1, 2, 3}
},
{
CollectionUtils.newSet(Arrays.asList("1", "2", "3")),
HashSet.class,
new Object[]{"1", "2", "3"}
},
{
CollectionUtils.newConcurrentSet(),
CopyOnWriteArraySet.class,
new Object[]{}
},
{
CollectionUtils.newConcurrentSet(1, 2, 3),
CopyOnWriteArraySet.class,
new Object[]{1, 2, 3}
},
{
CollectionUtils.newConcurrentSet(Arrays.asList("1", "2", "3")),
CopyOnWriteArraySet.class,
new Object[]{"1", "2", "3"}
},
};
}
@DataProvider
public Object[][] mapFactoryData() {
final Map<String, Integer> testMap = new HashMap<String, Integer>();
testMap.put("1", 1);
testMap.put("2", 2);
testMap.put("3", 3);
return new Object[][]{
{CollectionUtils.newMap(), HashMap.class, Collections.emptyMap()},
{CollectionUtils.newMap("1", 1, "2", 2, "3", 3), HashMap.class, testMap},
{CollectionUtils.newMap(testMap), HashMap.class, testMap},
{CollectionUtils.newConcurrentMap(), ConcurrentHashMap.class, Collections.emptyMap()},
{CollectionUtils.newConcurrentMap("1", 1, "2", 2, "3", 3), ConcurrentHashMap.class, testMap},
{CollectionUtils.newConcurrentMap(testMap), ConcurrentHashMap.class, testMap},
};
}
@DataProvider
public Object[][] mapFactoryWrongData() {
return new Object[][]{
{null},
{new Object[]{1, 2, 3}},
};
}
@Test(dataProvider = "isEmptyData")
public void testIsEmpty(final Collection collection, final boolean expected) {
final boolean actual = CollectionUtils.isEmpty(collection);
assertEquals(actual, expected);
}
@Test(dataProvider = "collectionFactoryData")
public void testCollectionFactory(final Collection collection,
final Class<? extends Collection> collectionClass,
final Object[] expected) {
assertNotNull(collection);
assertTrue(collectionClass.isInstance(collection));
assertEquals(collection.size(), expected.length);
assertTrue(collection.containsAll(Arrays.asList(expected)));
}
@Test(dataProvider = "mapFactoryData")
public void testMapFactory(final Map map,
final Class<? extends Map> mapClass,
final Map expected) {
assertNotNull(map);
assertTrue(mapClass.isInstance(map));
assertEquals(map, expected);
}
@Test(dataProvider = "mapFactoryWrongData", expectedExceptions = IllegalArgumentException.class)
public void testMapFactoryWrong(final Object[] values) {
CollectionUtils.newMap(values);
}
}
|
import { QueryInput } from 'types';
export const buildQueryVariables = (queryInput: QueryInput) => ({ countryInput: queryInput });
|
An Emirates Airbus A380 lands on the runway at Manchester Airport at Manchester Airport, northern England June 25, 2013. REUTERS/Phil Noble
By Cyril Altmeyer and Tim Hepher
TOULOUSE (Reuters) - Airbus lost a major order on Wednesday when Dubai's Emirates airline cancelled its entire requirement for 70 A350 jets, dealing an unexpected blow to the European planemaker and engine maker Rolls-Royce.
Shares in Airbus fell more than 3 percent on the loss of some 9 percent of the order book for its newest passenger jet, which is now only months away from entering service after eight years in development at a cost of some $15 billion.
Airbus acknowledged it was disappointed with the loss of the joint second-largest customer for the new twin-engined wide-body aircraft, but said it was confident of filling the spare production slots and did not see any financial impact from the cancellation.
"It is not good news commercially but not bad news financially," Airbus sales chief John Leahy told reporters in Toulouse, adding Boeing has had more cancellations for its 787 Dreamliner.
"There is certainly going to be no hole in production," Leahy said. Emirates was due to receive its first A350 in 2019.
Rolls-Royce, which is the sole engine maker for the A350, said the Emirates decision would result in a 2.6 billion-pound ($4.4 billion) hit to its order book.
"While disappointed with this decision, we are confident that the delivery slots which start towards the end of this decade vacated by Emirates will be taken up by other airlines," Rolls-Royce said in a statement.
Airbus shares were down 3.7 percent at 51.85 euros by 0847 GMT, while Rolls-Royce was down 1.7 percent at 1057 pence.
Airbus said the cancellation followed a "fleet requirement review" as Emirates shifts towards the large A380 superjumbo, for which it is the biggest customer.
Emirates was among the first buyers for the A350 when it placed the order for 50 A350-900 and 20 A350-1000 jets in 2007.
The deal was worth around $16 billion according to 2007 list prices, or close to $22 billion based on the current catalogue, although launch customers typically negotiate large discounts.
The first A350 is due to be delivered to Qatar Airways in the fourth quarter of this year.
"We are confident that we will sell the (production) slots in the coming months," an Airbus spokesman said. "There's no impact on the programme as deliveries were scheduled to start at the end of the decade."
Emirates said the original A350 order had lapsed and that it was reviewing its fleet requirements.
"Emirates was one of the original customers interested in the programme, but always seemed rather critical of the aircraft specifications," Cantor Fitzgerald Europe said in a note.
Aerospace industry analyst Nick Cunningham, managing partner at UK-base Agency Partners, said the move also posed questions over whether Middle Eastern carriers have over-expanded or are expecting lower growth than before.
Airbus and Boeing have dismissed warnings of a 'bubble' in commercial aircraft orders, which remained robust throughout the financial crisis, but some industry bankers say airlines have ordered too many aircraft despite growth in emerging markets.
The A350 decision meanwhile increases the chances that Airbus will revamp its largest models, analysts said.
Emirates has been pushing for Airbus to upgrade the A380 superjumbo with a more efficient engine, after increasing its total orders for the world's biggest passenger jet by 50 in November to 140 planes.
The carrier's chief executive, Tim Clark, said earlier this month that a revamped A380 could have a 10-12 percent performance improvement and that "we are hoping to move on that pretty soon".
Emirates is also in the process of firming up an order worth $76 billion for 150 of Boeing's latest jetliner, the 777X mini-jumbo powered by engines from General Electric.
(Refiled to remove repetitious phrase in paragraph 16)
(Additional reporting by James Regan in Paris, Victoria Bryan in Frankfurt and Nadia Saleem in Dubai; Editing by Christopher Cushing and Greg Mahlich)
|
// ToStringPtr converts an optional string in protobuf to a string pointer.
func ToStringPtr(pbString servicepb.OptionalString) *string {
if pbString.GetNoValue() {
return nil
}
v := pbString.GetData()
return &v
}
|
<filename>src/debug/di/rsmain.cpp
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//*****************************************************************************
// File: RsMain.cpp
//
// Random RS utility stuff, plus root ICorCordbug implementation
//
//*****************************************************************************
#include "stdafx.h"
#include "primitives.h"
#include "safewrap.h"
#include "check.h"
#include <tlhelp32.h>
#include "wtsapi32.h"
#ifndef SM_REMOTESESSION
#define SM_REMOTESESSION 0x1000
#endif
#include "corpriv.h"
#include "../../dlls/mscorrc/resource.h"
#include <limits.h>
// The top level Cordb object is built around the Shim
#include "shimpriv.h"
//-----------------------------------------------------------------------------
// For debugging ease, cache some global values.
// Include these in retail & free because that's where we need them the most!!
// Optimized builds may not let us view locals & parameters. So Having these
// cached as global values should let us inspect almost all of
// the interesting parts of the RS even in a Retail build!
//-----------------------------------------------------------------------------
RSDebuggingInfo g_RSDebuggingInfo_OutOfProc = {0 }; // set to NULL
RSDebuggingInfo * g_pRSDebuggingInfo = &g_RSDebuggingInfo_OutOfProc;
// The following instances are used for invoking overloaded new/delete
forDbiWorker forDbi;
#ifdef _DEBUG
// For logs, we can print the string name for the debug codes.
const char * GetDebugCodeName(DWORD dwCode)
{
if (dwCode < 1 || dwCode > 9)
{
return "!Invalid Debug Event Code!";
}
static const char * const szNames[] = {
"(1) EXCEPTION_DEBUG_EVENT",
"(2) CREATE_THREAD_DEBUG_EVENT",
"(3) CREATE_PROCESS_DEBUG_EVENT",
"(4) EXIT_THREAD_DEBUG_EVENT",
"(5) EXIT_PROCESS_DEBUG_EVENT",
"(6) LOAD_DLL_DEBUG_EVENT",
"(7) UNLOAD_DLL_DEBUG_EVENT",
"(8) OUTPUT_DEBUG_STRING_EVENT",
"(9) RIP_EVENT",// <-- only on Win9X
};
return szNames[dwCode - 1];
}
#endif
//-----------------------------------------------------------------------------
// Per-thread state for Debug builds...
//-----------------------------------------------------------------------------
#ifdef RSCONTRACTS
DWORD DbgRSThread::s_TlsSlot = TLS_OUT_OF_INDEXES;
LONG DbgRSThread::s_Total = 0;
DbgRSThread::DbgRSThread()
{
m_cInsideRS = 0;
m_fIsInCallback = false;
m_fIsUnrecoverableErrorCallback = false;
m_cTotalDbgApiLocks = 0;
for(int i = 0; i < RSLock::LL_MAX; i++)
{
m_cLocks[i] = 0;
}
// Initialize Identity info
m_Cookie = COOKIE_VALUE;
m_tid = GetCurrentThreadId();
}
// NotifyTakeLock & NotifyReleaseLock are called by RSLock to update the per-thread locking context.
// This will assert if the operation is unsafe (ie, violates lock order).
void DbgRSThread::NotifyTakeLock(RSLock * pLock)
{
if (pLock->HasLock())
{
return;
}
int iLevel = pLock->GetLevel();
// Is it safe to take this lock?
// Must take "bigger" locks first. We shouldn't hold any locks at our current level either.
// If this lock is re-entrant and we're double-taking it, we would have returned already.
// And the locking model on the RS forbids taking multiple locks at the same level.
for(int i = iLevel; i >= 0; i --)
{
bool fHasLowerLock = m_cLocks[i] > 0;
CONSISTENCY_CHECK_MSGF(!fHasLowerLock, (
"RSLock violation. Trying to take lock '%s (%d)', but already have smaller lock at level %d'\n",
pLock->Name(), iLevel,
i));
}
// Update the counts
_ASSERTE(m_cLocks[iLevel] == 0);
m_cLocks[iLevel]++;
if (pLock->IsDbgApiLock())
m_cTotalDbgApiLocks++;
}
void DbgRSThread::NotifyReleaseLock(RSLock * pLock)
{
if (pLock->HasLock())
{
return;
}
int iLevel = pLock->GetLevel();
m_cLocks[iLevel]--;
_ASSERTE(m_cLocks[iLevel] == 0);
if (pLock->IsDbgApiLock())
m_cTotalDbgApiLocks--;
_ASSERTE(m_cTotalDbgApiLocks >= 0);
}
void DbgRSThread::TakeVirtualLock(RSLock::ERSLockLevel level)
{
m_cLocks[level]++;
}
void DbgRSThread::ReleaseVirtualLock(RSLock::ERSLockLevel level)
{
m_cLocks[level]--;
_ASSERTE(m_cLocks[level] >= 0);
}
// Get a DbgRSThread for the current OS thread id; lazily create if needed.
DbgRSThread * DbgRSThread::GetThread()
{
_ASSERTE(DbgRSThread::s_TlsSlot != TLS_OUT_OF_INDEXES);
void * p2 = TlsGetValue(DbgRSThread::s_TlsSlot);
if (p2 == NULL)
{
// We lazily create for threads that haven't gone through DllMain
// Since this is per-thread, we don't need to lock.
p2 = DbgRSThread::Create();
}
DbgRSThread * p = reinterpret_cast<DbgRSThread*> (p2);
_ASSERTE(p->m_Cookie == COOKIE_VALUE);
return p;
}
#endif // RSCONTRACTS
#ifdef _DEBUG
LONG CordbCommonBase::s_TotalObjectCount = 0;
LONG CordbCommonBase::s_CordbObjectUID = 0;
LONG CordbCommonBase::m_saDwInstance[enumMaxDerived];
LONG CordbCommonBase::m_saDwAlive[enumMaxDerived];
PVOID CordbCommonBase::m_sdThis[enumMaxDerived][enumMaxThis];
#endif
#ifdef _DEBUG_IMPL
// Mem tracking
LONG Cordb::s_DbgMemTotalOutstandingCordb = 0;
LONG Cordb::s_DbgMemTotalOutstandingInternalRefs = 0;
#endif
#ifdef TRACK_OUTSTANDING_OBJECTS
void *Cordb::s_DbgMemOutstandingObjects[MAX_TRACKED_OUTSTANDING_OBJECTS] = { NULL };
LONG Cordb::s_DbgMemOutstandingObjectMax = 0;
#endif
// Default implementation for neutering left-side resources.
void CordbBase::NeuterLeftSideResources()
{
LIMITED_METHOD_CONTRACT;
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
Neuter();
}
// Default implementation for neutering.
// All derived objects should eventually chain to this.
void CordbBase::Neuter()
{
// Neutering occurs under the process lock. Neuter can be called twice
// and so locking protects against races in double-delete.
// @dbgtodo - , some CordbBase objects (Cordb, CordbProcessEnum),
// don't have process affinity these should eventually be hoisted to the shim,
// and then we can enforce.
CordbProcess * pProcess = GetProcess();
if (pProcess != NULL)
{
_ASSERTE(pProcess->ThreadHoldsProcessLock());
}
CordbCommonBase::Neuter();
}
//-----------------------------------------------------------------------------
// NeuterLists
//-----------------------------------------------------------------------------
NeuterList::NeuterList()
{
m_pHead = NULL;
}
NeuterList::~NeuterList()
{
// Our owner should have neutered us before deleting us.
// Thus we should be empty.
CONSISTENCY_CHECK_MSGF(m_pHead == NULL, ("NeuterList not empty on shutdown. this=0x%p", this));
}
// Wrapper around code:NeuterList::UnsafeAdd
void NeuterList::Add(CordbProcess * pProcess, CordbBase * pObject)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
UnsafeAdd(pProcess, pObject);
}
//
// Add an object to be neutered.
//
// Arguments:
// pProcess - process that holds lock that will protect the neuter list
// pObject - object to add
//
// Returns:
// Throws on error.
//
// Notes:
// This will add it to the list and maintain an internal reference to it.
// This will take the process lock.
//
void NeuterList::UnsafeAdd(CordbProcess * pProcess, CordbBase * pObject)
{
_ASSERTE(pObject != NULL);
// Lock if needed.
RSLock * pLock = (pProcess != NULL) ? pProcess->GetProcessLock() : NULL;
RSLockHolder lockHolder(pLock, FALSE);
if (pLock != NULL) lockHolder.Acquire();
Node * pNode = new Node(); // throws on error.
pNode->m_pObject.Assign(pObject);
pNode->m_pNext = m_pHead;
m_pHead = pNode;
}
// Neuter everything on the list and clear it
//
// Arguments:
// pProcess - process tree that this neuterlist belongs in
// ticket - neuter ticket proving caller ensured we're safe to neuter.
//
// Assumptions:
// Caller ensures we're safe to neuter (required to obtain NeuterTicket)
//
// Notes:
// This will release all internal references and empty the list.
void NeuterList::NeuterAndClear(CordbProcess * pProcess)
{
RSLock * pLock = (pProcess != NULL) ? pProcess->GetProcessLock() : NULL;
(void)pLock; //prevent "unused variable" error from GCC
_ASSERTE((pLock == NULL) || pLock->HasLock());
while (m_pHead != NULL)
{
Node * pTemp = m_pHead;
m_pHead = m_pHead->m_pNext;
pTemp->m_pObject->Neuter();
delete pTemp; // will implicitly release
}
}
// Only neuter objects that are marked.
// Removes neutered objects from the list.
void NeuterList::SweepAllNeuterAtWillObjects(CordbProcess * pProcess)
{
_ASSERTE(pProcess != NULL);
RSLock * pLock = pProcess->GetProcessLock();
RSLockHolder lockHolder(pLock);
Node ** ppLast = &m_pHead;
Node * pCur = m_pHead;
while (pCur != NULL)
{
CordbBase * pObject = pCur->m_pObject;
if (pObject->IsNeuterAtWill() || pObject->IsNeutered())
{
// Delete
pObject->Neuter();
Node * pNext = pCur->m_pNext;
delete pCur; // dtor will implicitly release the internal ref to pObject
pCur = *ppLast = pNext;
}
else
{
// Move to next.
ppLast = &pCur->m_pNext;
pCur = pCur->m_pNext;
}
}
}
//-----------------------------------------------------------------------------
// Neuters all objects in the list and empties the list.
//
// Notes:
// See also code:LeftSideResourceCleanupList::SweepNeuterLeftSideResources,
// which only neuters objects that have been marked as NeuterAtWill (external
// ref count has gone to 0).
void LeftSideResourceCleanupList::NeuterLeftSideResourcesAndClear(CordbProcess * pProcess)
{
// Traversal protected under Process-lock.
// SG-lock must already be held to do neutering.
// Stop-Go lock is bigger than Process-lock.
// Neutering requires the Stop-Go lock (until we get rid of IPC events)
// But we want to be able to add to the Neuter list under the Process-lock.
// So we just need to protected m_pHead under process-lock.
// "Privatize" the list under the lock.
_ASSERTE(pProcess != NULL);
RSLock * pLock = pProcess->GetProcessLock();
Node * pCur = NULL;
{
RSLockHolder lockHolder(pLock); // only acquire lock if we have one
pCur = m_pHead;
m_pHead = NULL;
}
// @dbgtodo - eventually everything can be under the process lock.
_ASSERTE(!pLock->HasLock()); // Can't hold Process lock while calling NeuterLeftSideResources
// Now we're operating on local data, so traversing doesn't need to be under the lock.
while (pCur != NULL)
{
Node * pTemp = pCur;
pCur = pCur->m_pNext;
pTemp->m_pObject->NeuterLeftSideResources();
delete pTemp; // will implicitly release
}
}
//-----------------------------------------------------------------------------
// Only neuter objects that are marked. Removes neutered objects from the list.
//
// Arguments:
// pProcess - non-null process owning the objects in the list
//
// Notes:
// this cleans up left-side resources held by objects in the list.
// It may send IPC events to do this.
void LeftSideResourceCleanupList::SweepNeuterLeftSideResources(CordbProcess * pProcess)
{
_ASSERTE(pProcess != NULL);
// Must be safe to send IPC events.
_ASSERTE(pProcess->GetStopGoLock()->HasLock()); // holds this for neutering
_ASSERTE(pProcess->GetSynchronized());
RSLock * pLock = pProcess->GetProcessLock();
// Lock while we "privatize" the head.
RSLockHolder lockHolder(pLock);
Node * pHead = m_pHead;
m_pHead = NULL;
lockHolder.Release();
Node ** ppLast = &pHead;
Node * pCur = pHead;
// Can't hold the process-lock while calling Neuter.
while (pCur != NULL)
{
CordbBase * pObject = pCur->m_pObject;
if (pObject->IsNeuterAtWill() || pObject->IsNeutered())
{
// HeavyNueter can not be done under the process-lock because
// it may take the Stop-Go lock and send events.
pObject->NeuterLeftSideResources();
// Delete
Node * pNext = pCur->m_pNext;
delete pCur; // dtor will implicitly release the internal ref to pObject
pCur = *ppLast = pNext;
}
else
{
// Move to next.
ppLast = &pCur->m_pNext;
pCur = pCur->m_pNext;
}
}
// Now link back in. m_pHead may have changed while we were unlocked.
// The list does not need to be ordered.
lockHolder.Acquire();
*ppLast = m_pHead;
m_pHead = pHead;
}
/* ------------------------------------------------------------------------- *
* CordbBase class
* ------------------------------------------------------------------------- */
// Do any initialization necessary for both CorPublish and CorDebug
// This includes enabling logging and adding the SEDebug priv.
void CordbCommonBase::InitializeCommon()
{
static bool IsInitialized = false;
if( IsInitialized )
{
return;
}
#ifdef STRESS_LOG
{
bool fStressLog = false;
#ifdef _DEBUG
// default for stress log is on debug build
fStressLog = true;
#endif // DEBUG
// StressLog will turn on stress logging for the entire runtime.
// RSStressLog is only used here and only effects just the RS.
fStressLog =
(REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_StressLog, fStressLog) != 0) ||
(CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_RSStressLog) != 0);
if (fStressLog == true)
{
unsigned facilities = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_LogFacility, LF_ALL);
unsigned level = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_LogLevel, LL_INFO1000);
unsigned bytesPerThread = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_StressLogSize, STRESSLOG_CHUNK_SIZE * 2);
unsigned totalBytes = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_TotalStressLogSize, STRESSLOG_CHUNK_SIZE * 1024);
#ifndef FEATURE_PAL
StressLog::Initialize(facilities, level, bytesPerThread, totalBytes, GetModuleInst());
#else
StressLog::Initialize(facilities, level, bytesPerThread, totalBytes, NULL);
#endif
}
}
#endif // STRESS_LOG
#ifdef LOGGING
InitializeLogging();
#endif
// Add debug privilege. This will let us call OpenProcess() on anything, regardless of ACL.
AddDebugPrivilege();
IsInitialized = true;
}
// Adjust the permissions of this process to ensure that we have
// the debugging priviledge. If we can't make the adjustment, it
// only means that we won't be able to attach to a service under
// NT, so we won't treat that as a critical failure.
// This also will let us call OpenProcess() on anything, regardless of DACL. This allows an
// Admin debugger to attach to a debuggee in the guest account.
// Ideally, the debugger would set this (and we wouldn't mess with privileges at all). However, we've been
// setting this since V1.0 and removing it may be a breaking change.
void CordbCommonBase::AddDebugPrivilege()
{
#ifndef FEATURE_PAL
HANDLE hToken;
TOKEN_PRIVILEGES Privileges;
BOOL fSucc;
LUID SeDebugLuid = {0, 0};
fSucc = LookupPrivilegeValueW(NULL, SE_DEBUG_NAME, &SeDebugLuid);
DWORD err = GetLastError();
if (!fSucc)
{
STRESS_LOG1(LF_CORDB, LL_INFO1000, "Unable to adjust permissions of this process to include SE_DEBUG. Lookup failed %d\n", err);
return;
}
// Retrieve a handle of the access token
fSucc = OpenProcessToken(GetCurrentProcess(),
TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
&hToken);
if (fSucc)
{
Privileges.PrivilegeCount = 1;
Privileges.Privileges[0].Luid = SeDebugLuid;
Privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
AdjustTokenPrivileges(hToken,
FALSE,
&Privileges,
sizeof(TOKEN_PRIVILEGES),
(PTOKEN_PRIVILEGES) NULL,
(PDWORD) NULL);
err = GetLastError();
// The return value of AdjustTokenPrivileges cannot be tested.
if (err != ERROR_SUCCESS)
{
STRESS_LOG1(LF_CORDB, LL_INFO1000,
"Unable to adjust permissions of this process to include SE_DEBUG. Adjust failed %d\n", err);
}
else
{
LOG((LF_CORDB, LL_INFO1000, "Adjusted process permissions to include SE_DEBUG.\n"));
}
CloseHandle(hToken);
}
#endif
}
namespace
{
//
// DefaultManagedCallback2
//
// In the event that the debugger is of an older version than the Right Side & Left Side, the Right Side may issue
// new callbacks that the debugger is not expecting. In this case, we need to provide a default behavior for those
// new callbacks, if for nothing else than to force the debugger to Continue().
//
class DefaultManagedCallback2 : public ICorDebugManagedCallback2
{
public:
DefaultManagedCallback2(ICorDebug* pDebug);
virtual ~DefaultManagedCallback2() { }
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** pInterface);
virtual ULONG STDMETHODCALLTYPE AddRef();
virtual ULONG STDMETHODCALLTYPE Release();
COM_METHOD FunctionRemapOpportunity(ICorDebugAppDomain* pAppDomain,
ICorDebugThread* pThread,
ICorDebugFunction* pOldFunction,
ICorDebugFunction* pNewFunction,
ULONG32 oldILOffset);
COM_METHOD FunctionRemapComplete(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pFunction);
COM_METHOD CreateConnection(ICorDebugProcess *pProcess,
CONNID dwConnectionId,
__in_z WCHAR* pConnectionName);
COM_METHOD ChangeConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId);
COM_METHOD DestroyConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId);
COM_METHOD Exception(ICorDebugAppDomain *pAddDomain,
ICorDebugThread *pThread,
ICorDebugFrame *pFrame,
ULONG32 nOffset,
CorDebugExceptionCallbackType eventType,
DWORD dwFlags );
COM_METHOD ExceptionUnwind(ICorDebugAppDomain *pAddDomain,
ICorDebugThread *pThread,
CorDebugExceptionUnwindCallbackType eventType,
DWORD dwFlags );
COM_METHOD MDANotification(
ICorDebugController * pController,
ICorDebugThread *pThread,
ICorDebugMDA * pMDA
) { return E_NOTIMPL; }
private:
// not implemented
DefaultManagedCallback2(const DefaultManagedCallback2&);
DefaultManagedCallback2& operator=(const DefaultManagedCallback2&);
ICorDebug* m_pDebug;
LONG m_refCount;
};
DefaultManagedCallback2::DefaultManagedCallback2(ICorDebug* pDebug) : m_pDebug(pDebug), m_refCount(0)
{
}
HRESULT
DefaultManagedCallback2::QueryInterface(REFIID iid, void** pInterface)
{
if (IID_ICorDebugManagedCallback2 == iid)
{
*pInterface = static_cast<ICorDebugManagedCallback2*>(this);
}
else if (IID_IUnknown == iid)
{
*pInterface = static_cast<IUnknown*>(this);
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
this->AddRef();
return S_OK;
}
ULONG
DefaultManagedCallback2::AddRef()
{
return InterlockedIncrement(&m_refCount);
}
ULONG
DefaultManagedCallback2::Release()
{
ULONG ulRef = InterlockedDecrement(&m_refCount);
if (0 == ulRef)
{
delete this;
}
return ulRef;
}
HRESULT
DefaultManagedCallback2::FunctionRemapOpportunity(ICorDebugAppDomain* pAppDomain,
ICorDebugThread* pThread,
ICorDebugFunction* pOldFunction,
ICorDebugFunction* pNewFunction,
ULONG32 oldILOffset)
{
//
// In theory, this function should never be reached. To get here, we'd have to have a debugger which doesn't
// support edit and continue somehow turn on edit & continue features.
//
_ASSERTE(!"Edit & Continue callback reached when debugger doesn't support Edit And Continue");
// If you ignore this assertion, or you're in a retail build, there are two options as far as how to proceed
// from this point
// o We can do nothing, and let the debugee process hang, or
// o We can silently ignore the FunctionRemapOpportunity, and tell the debugee to Continue running.
//
// For now, we'll silently ignore the function remapping.
pAppDomain->Continue(false);
pAppDomain->Release();
return S_OK;
}
HRESULT
DefaultManagedCallback2::FunctionRemapComplete(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFunction *pFunction)
{
//
// In theory, this function should never be reached. To get here, we'd have to have a debugger which doesn't
// support edit and continue somehow turn on edit & continue features.
//
_ASSERTE(!"Edit & Continue callback reached when debugger doesn't support Edit And Continue");
return E_NOTIMPL;
}
//
// <TODO>
// These methods are current left unimplemented.
//
// Create/Change/Destroy Connection *should* force the Process/AppDomain/Thread to Continue(). Currently the
// arguments to these functions don't provide the relevant Process/AppDomain/Thread, so there is no way to figure
// out which Threads should be forced to Continue().
//
// </TODO>
//
HRESULT
DefaultManagedCallback2::CreateConnection(ICorDebugProcess *pProcess,
CONNID dwConnectionId,
__in_z WCHAR* pConnectionName)
{
_ASSERTE(!"DefaultManagedCallback2::CreateConnection not implemented");
return E_NOTIMPL;
}
HRESULT
DefaultManagedCallback2::ChangeConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId)
{
_ASSERTE(!"DefaultManagedCallback2::ChangeConnection not implemented");
return E_NOTIMPL;
}
HRESULT
DefaultManagedCallback2::DestroyConnection(ICorDebugProcess *pProcess, CONNID dwConnectionId)
{
_ASSERTE(!"DefaultManagedCallback2::DestroyConnection not implemented");
return E_NOTIMPL;
}
HRESULT
DefaultManagedCallback2::Exception(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
ICorDebugFrame *pFrame,
ULONG32 nOffset,
CorDebugExceptionCallbackType eventType,
DWORD dwFlags )
{
//
// Just ignore and continue the process.
//
pAppDomain->Continue(false);
return S_OK;
}
HRESULT
DefaultManagedCallback2::ExceptionUnwind(ICorDebugAppDomain *pAppDomain,
ICorDebugThread *pThread,
CorDebugExceptionUnwindCallbackType eventType,
DWORD dwFlags )
{
//
// Just ignore and continue the process.
//
pAppDomain->Continue(false);
return S_OK;
}
//
// DefaultManagedCallback3
//
// In the event that the debugger is of an older version than the Right Side & Left Side, the Right Side may issue
// new callbacks that the debugger is not expecting. In this case, we need to provide a default behavior for those
// new callbacks, if for nothing else than to force the debugger to Continue().
//
class DefaultManagedCallback3 : public ICorDebugManagedCallback3
{
public:
DefaultManagedCallback3(ICorDebug* pDebug);
virtual ~DefaultManagedCallback3() { }
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** pInterface);
virtual ULONG STDMETHODCALLTYPE AddRef();
virtual ULONG STDMETHODCALLTYPE Release();
COM_METHOD CustomNotification(ICorDebugThread * pThread, ICorDebugAppDomain * pAppDomain);
private:
// not implemented
DefaultManagedCallback3(const DefaultManagedCallback3&);
DefaultManagedCallback3& operator=(const DefaultManagedCallback3&);
ICorDebug* m_pDebug;
LONG m_refCount;
};
DefaultManagedCallback3::DefaultManagedCallback3(ICorDebug* pDebug) : m_pDebug(pDebug), m_refCount(0)
{
}
HRESULT
DefaultManagedCallback3::QueryInterface(REFIID iid, void** pInterface)
{
if (IID_ICorDebugManagedCallback3 == iid)
{
*pInterface = static_cast<ICorDebugManagedCallback3*>(this);
}
else if (IID_IUnknown == iid)
{
*pInterface = static_cast<IUnknown*>(this);
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
this->AddRef();
return S_OK;
}
ULONG
DefaultManagedCallback3::AddRef()
{
return InterlockedIncrement(&m_refCount);
}
ULONG
DefaultManagedCallback3::Release()
{
ULONG ulRef = InterlockedDecrement(&m_refCount);
if (0 == ulRef)
{
delete this;
}
return ulRef;
}
HRESULT
DefaultManagedCallback3::CustomNotification(ICorDebugThread * pThread, ICorDebugAppDomain * pAppDomain)
{
//
// Just ignore and continue the process.
//
pAppDomain->Continue(false);
return S_OK;
}
//
// DefaultManagedCallback4
//
// In the event that the debugger is of an older version than the Right Side & Left Side, the Right Side may issue
// new callbacks that the debugger is not expecting. In this case, we need to provide a default behavior for those
// new callbacks, if for nothing else than to force the debugger to Continue().
//
class DefaultManagedCallback4 : public ICorDebugManagedCallback4
{
public:
DefaultManagedCallback4(ICorDebug* pDebug);
virtual ~DefaultManagedCallback4() { }
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void** pInterface);
virtual ULONG STDMETHODCALLTYPE AddRef();
virtual ULONG STDMETHODCALLTYPE Release();
COM_METHOD BeforeGarbageCollection(ICorDebugProcess* pProcess);
COM_METHOD AfterGarbageCollection(ICorDebugProcess* pProcess);
COM_METHOD DataBreakpoint(ICorDebugProcess* pProcess, ICorDebugThread* pThread, BYTE* pContext, ULONG32 contextSize);
private:
// not implemented
DefaultManagedCallback4(const DefaultManagedCallback4&);
DefaultManagedCallback4& operator=(const DefaultManagedCallback4&);
ICorDebug* m_pDebug;
LONG m_refCount;
};
DefaultManagedCallback4::DefaultManagedCallback4(ICorDebug* pDebug) : m_pDebug(pDebug), m_refCount(0)
{
}
HRESULT
DefaultManagedCallback4::QueryInterface(REFIID iid, void** pInterface)
{
if (IID_ICorDebugManagedCallback4 == iid)
{
*pInterface = static_cast<ICorDebugManagedCallback4*>(this);
}
else if (IID_IUnknown == iid)
{
*pInterface = static_cast<IUnknown*>(this);
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
this->AddRef();
return S_OK;
}
ULONG
DefaultManagedCallback4::AddRef()
{
return InterlockedIncrement(&m_refCount);
}
ULONG
DefaultManagedCallback4::Release()
{
ULONG ulRef = InterlockedDecrement(&m_refCount);
if (0 == ulRef)
{
delete this;
}
return ulRef;
}
HRESULT
DefaultManagedCallback4::BeforeGarbageCollection(ICorDebugProcess* pProcess)
{
//
// Just ignore and continue the process.
//
pProcess->Continue(false);
return S_OK;
}
HRESULT
DefaultManagedCallback4::AfterGarbageCollection(ICorDebugProcess* pProcess)
{
//
// Just ignore and continue the process.
//
pProcess->Continue(false);
return S_OK;
}
HRESULT
DefaultManagedCallback4::DataBreakpoint(ICorDebugProcess* pProcess, ICorDebugThread* pThread, BYTE* pContext, ULONG32 contextSize)
{
//
// Just ignore and continue the process.
//
pProcess->Continue(false);
return S_OK;
}
}
/* ------------------------------------------------------------------------- *
* Cordb class
* ------------------------------------------------------------------------- */
Cordb::Cordb(CorDebugInterfaceVersion iDebuggerVersion)
: Cordb(iDebuggerVersion, ProcessDescriptor::CreateUninitialized())
{
}
Cordb::Cordb(CorDebugInterfaceVersion iDebuggerVersion, const ProcessDescriptor& pd)
: CordbBase(NULL, 0, enumCordb),
m_processes(11),
m_initialized(false),
m_debuggerSpecifiedVersion(iDebuggerVersion),
m_pd(pd)
#ifdef FEATURE_CORESYSTEM
,
m_targetCLR(0)
#endif
{
g_pRSDebuggingInfo->m_Cordb = this;
#ifdef _DEBUG_IMPL
// Memory leak detection
InterlockedIncrement(&s_DbgMemTotalOutstandingCordb);
#endif
}
Cordb::~Cordb()
{
LOG((LF_CORDB, LL_INFO10, "C::~C Terminating Cordb object.\n"));
if (m_pd.m_ApplicationGroupId != NULL)
{
delete [] m_pd.m_ApplicationGroupId;
}
g_pRSDebuggingInfo->m_Cordb = NULL;
}
void Cordb::Neuter()
{
if (this->IsNeutered())
{
return;
}
RSLockHolder lockHolder(&m_processListMutex);
m_pProcessEnumList.NeuterAndClear(NULL);
HRESULT hr = S_OK;
EX_TRY // @dbgtodo push this up.
{
// Iterating needs to be done under the processList lock (small), while neutering
// needs to be able to take the process lock (big).
RSPtrArray<CordbProcess> list;
m_processes.TransferToArray(&list); // throws
// can't hold list lock while calling CordbProcess::Neuter (which
// will take the Process-lock).
lockHolder.Release();
list.NeuterAndClear();
// List dtor calls release on each element
}
EX_CATCH_HRESULT(hr);
SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
CordbCommonBase::Neuter();
// Implicit release from smart ptr.
}
#ifdef _DEBUG_IMPL
void CheckMemLeaks()
{
// Memory leak detection.
LONG l = InterlockedDecrement(&Cordb::s_DbgMemTotalOutstandingCordb);
if (l == 0)
{
// If we just released our final Cordb root object, then we expect no internal references at all.
// Note that there may still be external references (and thus not all objects may have been
// deleted yet).
bool fLeakedInternal = (Cordb::s_DbgMemTotalOutstandingInternalRefs > 0);
// Some Cordb objects (such as CordbValues) may not be rooted, and thus we can't neuter
// them and thus an external ref may keep them alive. Since these objects may have internal refs,
// This means that external refs can keep internal refs.
// Thus this assert must be tempered if unrooted objects are leaked. (But that means we can always
// assert the tempered version; regardless of bugs in Cordbg).
CONSISTENCY_CHECK_MSGF(!fLeakedInternal,
("'%d' Outstanding internal references at final Cordb::Terminate\n",
Cordb::s_DbgMemTotalOutstandingInternalRefs));
DWORD dLeakCheck = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgLeakCheck);
if (dLeakCheck > 0)
{
// We have 1 ref for this Cordb root object. All other refs should have been deleted.
CONSISTENCY_CHECK_MSGF(Cordb::s_TotalObjectCount == 1, ("'%d' total cordbBase objects are leaked.\n",
Cordb::s_TotalObjectCount-1));
}
}
}
#endif
// This shuts down ICorDebug.
// All CordbProcess objects owned by this Cordb object must have either:
// - returned for a Detach() call
// - returned from dispatching the ExitProcess() callback.
// In both cases, CordbProcess::NeuterChildren has been called, although the Process object itself
// may not yet be neutered. This condition will ensure that the CordbProcess objects don't need
// any resources that we're about to release.
HRESULT Cordb::Terminate()
{
LOG((LF_CORDB, LL_INFO10000, "[%x] Terminating Cordb\n", GetCurrentThreadId()));
if (!m_initialized)
return E_FAIL;
FAIL_IF_NEUTERED(this);
// We can't terminate the debugging services from within a callback.
// Caller is supposed to be out of all callbacks when they call this.
// This also avoids a deadlock because we'll shutdown the RCET, which would block if we're
// in the RCET.
if (m_rcEventThread->IsRCEventThread())
{
STRESS_LOG0(LF_CORDB, LL_INFO10, "C::T: failed on RCET\n");
_ASSERTE(!"Gross API Misuse: Debugger shouldn't call ICorDebug::Terminate from within a managed callback.");
return CORDBG_E_CANT_CALL_ON_THIS_THREAD;
}
// @todo - do we need to throw some switch to prevent new processes from being added now?
// VS must stop all debugging before terminating. Fail if we have any non-neutered processes
// (b/c those processes should have been either shutdown or detached).
// We are in an undefined state if this check fails.
// Process are removed from this list before Process::Detach() returns and before the ExitProcess callback is dispatched.
// Thus any processes in this list should be live or have an unrecoverable error.
{
RSLockHolder ch(&m_processListMutex);
HASHFIND hfDT;
CordbProcess * pProcess;
for (pProcess= (CordbProcess*) m_processes.FindFirst(&hfDT);
pProcess != NULL;
pProcess = (CordbProcess*) m_processes.FindNext(&hfDT))
{
_ASSERTE(pProcess->IsSafeToSendEvents() || pProcess->m_unrecoverableError);
if (pProcess->IsSafeToSendEvents() && !pProcess->m_unrecoverableError)
{
CONSISTENCY_CHECK_MSGF(false, ("Gross API misuses. Callling terminate with live process:0x%p\n", pProcess));
STRESS_LOG1(LF_CORDB, LL_INFO10, "Cordb::Terminate b/c of non-neutered process '%p'\n", pProcess);
// This is very bad.
// GROSS API MISUSES - Debugger is calling ICorDebug::Terminate while there
// are still outstanding (non-neutered) ICorDebugProcess.
// ICorDebug is now in an undefined state.
// We will also leak memory b/c we're leaving the EventThreads up (which will in turn
// keep a reference to this Cordb object).
return ErrWrapper(CORDBG_E_ILLEGAL_SHUTDOWN_ORDER);
}
}
}
// @todo- ideally, we'd wait for all threads to get outside of ICorDebug before we proceed.
// That's tough to implement in practice; but we at least wait for both ET to exit. As these
// guys dispatch callbacks, that means at least we'll wait until VS is outside of any callback.
//
// Stop the event handling threads.
//
if (m_rcEventThread != NULL)
{
// Stop may do significant work b/c if it drains the worker queue.
m_rcEventThread->Stop();
delete m_rcEventThread;
m_rcEventThread = NULL;
}
#ifdef _DEBUG
// @todo - this disables thread-safety asserts on the process-list-hash. We clearly
// can't hold the lock while neutering it. (lock violation since who knows what neuter may do)
// @todo- we may have races beteen Cordb::Terminate and Cordb::CreateProcess as both
// modify the process list. This is mitigated since Terminate is supposed to be the last method called.
m_processes.DebugSetRSLock(NULL);
#endif
//
// We expect the debugger to neuter all processes before calling Terminate(), so do not neuter them here.
//
#ifdef _DEBUG
{
HASHFIND find;
_ASSERTE(m_processes.FindFirst(&find) == NULL); // should be emptied by neuter
}
#endif //_DEBUG
// Officially mark us as neutered.
this->Neuter();
m_processListMutex.Destroy();
//
// Release the callbacks
//
m_managedCallback.Clear();
m_managedCallback2.Clear();
m_managedCallback3.Clear();
m_managedCallback4.Clear();
m_unmanagedCallback.Clear();
// The Shell may still have outstanding references, so we don't want to shutdown logging yet.
// But everything should be neutered anyways.
m_initialized = FALSE;
// After this, all outstanding Cordb objects should be neutered.
LOG((LF_CORDB, LL_EVERYTHING, "Cordb finished terminating.\n"));
#if defined(_DEBUG)
//
// Assert that there are no outstanding object references within the debugging
// API itself.
//
CheckMemLeaks();
#endif
return S_OK;
}
HRESULT Cordb::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebug)
*pInterface = static_cast<ICorDebug*>(this);
else if (id == IID_IUnknown)
*pInterface = static_cast<IUnknown*>(static_cast<ICorDebug*>(this));
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//
// Initialize -- setup the ICorDebug object by creating any objects
// that the object needs to operate and starting the two needed IPC
// threads.
//
HRESULT Cordb::Initialize(void)
{
HRESULT hr = S_OK;
FAIL_IF_NEUTERED(this);
if (!m_initialized)
{
CordbCommonBase::InitializeCommon();
// Since logging wasn't active when we called CordbBase, do it now.
LOG((LF_CORDB, LL_EVERYTHING, "Memory: CordbBase object allocated: this=%p, count=%d, RootObject\n", this, s_TotalObjectCount));
LOG((LF_CORDB, LL_INFO10, "Initializing ICorDebug...\n"));
// Ensure someone hasn't messed up the IPC buffer size
_ASSERTE(sizeof(DebuggerIPCEvent) <= CorDBIPC_BUFFER_SIZE);
//
// Init things that the Cordb will need to operate
//
m_processListMutex.Init("Process-List Lock", RSLock::cLockReentrant, RSLock::LL_PROCESS_LIST_LOCK);
#ifdef _DEBUG
m_processes.DebugSetRSLock(&m_processListMutex);
#endif
//
// Create the runtime controller event listening thread
//
m_rcEventThread = new (nothrow) CordbRCEventThread(this);
if (m_rcEventThread == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
// This stuff only creates events & starts the thread
hr = m_rcEventThread->Init();
if (SUCCEEDED(hr))
hr = m_rcEventThread->Start();
if (FAILED(hr))
{
delete m_rcEventThread;
m_rcEventThread = NULL;
}
}
if (FAILED(hr))
goto exit;
m_initialized = TRUE;
}
exit:
return hr;
}
//---------------------------------------------------------------------------------------
//
// Throw if no more process can be debugged with this Cordb object.
//
// Notes:
// This is highly dependent on the wait sets in the Win32 & RCET threads.
// @dbgtodo- this will end up in the shim.
void Cordb::EnsureAllowAnotherProcess()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
RSLockHolder ch(&m_processListMutex);
// Cordb, Win32, and RCET all have process sets, but Cordb's is the
// best count of total debuggees. The RCET set is volatile (processes
// are added / removed when they become synchronized), and Win32's set
// doesn't include all processes.
int cCurProcess = GetProcessList()->GetCount();
// In order to accept another debuggee, we must have a free slot in all
// wait sets. Currently, we don't expose the size of those sets, but
// we know they're MAXIMUM_WAIT_OBJECTS. Note that we lose one slot
// to the control event.
if (cCurProcess >= MAXIMUM_WAIT_OBJECTS - 1)
{
ThrowHR(CORDBG_E_TOO_MANY_PROCESSES);
}
}
//---------------------------------------------------------------------------------------
//
// Add process to the list.
//
// Notes:
// AddProcess -- add a process object to this ICorDebug's hash of processes.
// This also tells this ICorDebug's runtime controller thread that the
// process set has changed so it can update its list of wait events.
//
void Cordb::AddProcess(CordbProcess* process)
{
// At this point, we should have already checked that we
// can have another debuggee.
STRESS_LOG1(LF_CORDB, LL_INFO10, "Cordb::AddProcess %08x...\n", process);
if ((m_managedCallback == NULL) || (m_managedCallback2 == NULL) || (m_managedCallback3 == NULL) || (m_managedCallback4 == NULL))
{
ThrowHR(E_FAIL);
}
RSLockHolder lockHolder(&m_processListMutex);
// Once we add another process, all outstanding process-enumerators become invalid.
m_pProcessEnumList.NeuterAndClear(NULL);
GetProcessList()->AddBaseOrThrow(process);
m_rcEventThread->ProcessStateChanged();
}
//
// RemoveProcess -- remove a process object from this ICorDebug's hash of
// processes. This also tells this ICorDebug's runtime controller thread
// that the process set has changed so it can update its list of wait events.
//
void Cordb::RemoveProcess(CordbProcess* process)
{
STRESS_LOG1(LF_CORDB, LL_INFO10, "Cordb::RemoveProcess %08x...\n", process);
LockProcessList();
GetProcessList()->RemoveBase((ULONG_PTR)process->m_id);
m_rcEventThread->ProcessStateChanged();
UnlockProcessList();
}
//
// LockProcessList -- Lock the process list.
//
void Cordb::LockProcessList(void)
{
m_processListMutex.Lock();
}
//
// UnlockProcessList -- Unlock the process list.
//
void Cordb::UnlockProcessList(void)
{
m_processListMutex.Unlock();
}
#ifdef _DEBUG
// Return true iff this thread owns the ProcessList lock
bool Cordb::ThreadHasProcessListLock()
{
return m_processListMutex.HasLock();
}
#endif
// Get the hash that has the process.
CordbSafeHashTable<CordbProcess> *Cordb::GetProcessList()
{
// If we're accessing the hash, we'd better be locked.
_ASSERTE(ThreadHasProcessListLock());
return &m_processes;
}
HRESULT Cordb::SendIPCEvent(CordbProcess * pProcess,
DebuggerIPCEvent * pEvent,
SIZE_T eventSize)
{
HRESULT hr = S_OK;
LOG((LF_CORDB, LL_EVERYTHING, "SendIPCEvent in Cordb called\n"));
EX_TRY
{
hr = m_rcEventThread->SendIPCEvent(pProcess, pEvent, eventSize);
}
EX_CATCH_HRESULT(hr)
return hr;
}
void Cordb::ProcessStateChanged(void)
{
m_rcEventThread->ProcessStateChanged();
}
HRESULT Cordb::WaitForIPCEventFromProcess(CordbProcess* process,
CordbAppDomain *pAppDomain,
DebuggerIPCEvent* event)
{
return m_rcEventThread->WaitForIPCEventFromProcess(process,
pAppDomain,
event);
}
HRESULT Cordb::SetTargetCLR(HMODULE hmodTargetCLR)
{
if (m_initialized)
return E_FAIL;
#ifdef FEATURE_CORESYSTEM
m_targetCLR = hmodTargetCLR;
#endif
// @REVIEW: are we happy with this workaround? It allows us to use the existing
// infrastructure for instance name decoration, but it really doesn't fit
// the same model because coreclr.dll isn't in this process and hmodTargetCLR
// is the debuggee target, not the coreclr.dll to bind utilcode to..
CoreClrCallbacks cccallbacks;
cccallbacks.m_hmodCoreCLR = hmodTargetCLR;
cccallbacks.m_pfnIEE = NULL;
cccallbacks.m_pfnGetCORSystemDirectory = NULL;
InitUtilcode(cccallbacks);
return S_OK;
}
//-----------------------------------------------------------
// ICorDebug
//-----------------------------------------------------------
// Set the handler for callbacks on managed events
// This can not be NULL.
// If we're debugging V2.0 apps, pCallback must implement ICDManagedCallback2
// @todo- what if somebody calls this after we've already initialized? (eg, changes
// the callback underneath us)
HRESULT Cordb::SetManagedHandler(ICorDebugManagedCallback *pCallback)
{
if (!m_initialized)
return E_FAIL;
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pCallback, ICorDebugManagedCallback*);
m_managedCallback.Clear();
m_managedCallback2.Clear();
m_managedCallback3.Clear();
m_managedCallback4.Clear();
// For SxS, V2.0 debuggers must implement ManagedCallback2 to handle v2.0 debug events.
// For Single-CLR, A v1.0 debugger may actually geta V2.0 debuggee.
pCallback->QueryInterface(IID_ICorDebugManagedCallback2, (void **)&m_managedCallback2);
if (m_managedCallback2 == NULL)
{
if (GetDebuggerVersion() >= CorDebugVersion_2_0)
{
// This will leave our internal callbacks null, which future operations (Create/Attach) will
// use to know that we're not sufficiently initialized.
return E_NOINTERFACE;
}
else
{
// This should only be used in a single-CLR shimming scenario.
m_managedCallback2.Assign(new (nothrow) DefaultManagedCallback2(this));
if (m_managedCallback2 == NULL)
{
return E_OUTOFMEMORY;
}
}
}
pCallback->QueryInterface(IID_ICorDebugManagedCallback3, (void **)&m_managedCallback3);
if (m_managedCallback3 == NULL)
{
m_managedCallback3.Assign(new (nothrow) DefaultManagedCallback3(this));
}
if (m_managedCallback3 == NULL)
{
return E_OUTOFMEMORY;
}
pCallback->QueryInterface(IID_ICorDebugManagedCallback4, (void **)&m_managedCallback4);
if (m_managedCallback4 == NULL)
{
m_managedCallback4.Assign(new (nothrow) DefaultManagedCallback4(this));
}
if (m_managedCallback4 == NULL)
{
return E_OUTOFMEMORY;
}
m_managedCallback.Assign(pCallback);
return S_OK;
}
HRESULT Cordb::SetUnmanagedHandler(ICorDebugUnmanagedCallback *pCallback)
{
if (!m_initialized)
return E_FAIL;
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pCallback, ICorDebugUnmanagedCallback*);
m_unmanagedCallback.Assign(pCallback);
return S_OK;
}
// CreateProcess() isn't supported on Windows CoreCLR.
// It is currently supported on Mac CoreCLR, but that may change.
bool Cordb::IsCreateProcessSupported()
{
#if !defined(FEATURE_DBGIPC_TRANSPORT_DI)
return false;
#else
return true;
#endif
}
// Given everything we know about our configuration, can we support interop-debugging
bool Cordb::IsInteropDebuggingSupported()
{
// We explicitly refrain from checking the unmanaged callback. See comment in
// ICorDebug::SetUnmanagedHandler for details.
#ifdef FEATURE_INTEROP_DEBUGGING
#if !defined(FEATURE_CORESYSTEM)
// Interop debugging is only supported internally on CoreCLR.
// Check if the special reg key is set. If not, then we don't allow interop debugging.
if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgEnableMixedModeDebugging) == 0)
{
return false;
}
#endif // FEATURE_CORESYSTEM
return true;
#else
return false;
#endif
}
//---------------------------------------------------------------------------------------
//
// Implementation of ICorDebug::CreateProcess.
// Creates a process.
//
// Arguments:
// The following arguments are passed thru unmodified to the OS CreateProcess API and
// are defined by that API.
// lpApplicationName
// lpCommandLine
// lpProcessAttributes
// lpThreadAttributes
// bInheritHandles
// dwCreationFlags
// lpCurrentDirectory
// lpStartupInfo
// lpProcessInformation
// debuggingFlags
//
// ppProcess - Space to fill in for the resulting process, returned as a valid pointer
// on any success HRESULT.
//
// Return Value:
// Normal HRESULT semantics.
//
//---------------------------------------------------------------------------------------
HRESULT Cordb::CreateProcess(LPCWSTR lpApplicationName,
__in_z LPWSTR lpCommandLine,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
PVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation,
CorDebugCreateProcessFlags debuggingFlags,
ICorDebugProcess **ppProcess)
{
return CreateProcessCommon(NULL,
lpApplicationName,
lpCommandLine,
lpProcessAttributes,
lpThreadAttributes,
bInheritHandles,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
lpStartupInfo,
lpProcessInformation,
debuggingFlags,
ppProcess);
}
HRESULT Cordb::CreateProcessCommon(ICorDebugRemoteTarget * pRemoteTarget,
LPCWSTR lpApplicationName,
__in_z LPWSTR lpCommandLine,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
PVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation,
CorDebugCreateProcessFlags debuggingFlags,
ICorDebugProcess ** ppProcess)
{
// If you hit this assert, it means that you are attempting to create a process without specifying the version
// number.
_ASSERTE(CorDebugInvalidVersion != m_debuggerSpecifiedVersion);
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess**);
HRESULT hr = S_OK;
EX_TRY
{
if (!m_initialized)
{
ThrowHR(E_FAIL);
}
// Check that we support the debugger version
CheckCompatibility();
#ifdef FEATURE_INTEROP_DEBUGGING
// DEBUG_PROCESS (=0x1) means debug this process & all future children.
// DEBUG_ONLY_THIS_PROCESS =(0x2) means just debug the immediate process.
// If we want to support DEBUG_PROCESS, then we need to have the RS sniff for new CREATE_PROCESS
// events and spawn new CordbProcess for them.
switch(dwCreationFlags & (DEBUG_PROCESS | DEBUG_ONLY_THIS_PROCESS))
{
// 1) managed-only debugging
case 0:
break;
// 2) failure - returns E_NOTIMPL. (as this would involve debugging all of our children processes).
case DEBUG_PROCESS:
ThrowHR(E_NOTIMPL);
// 3) Interop-debugging.
// Note that MSDN (at least as of Jan 2003) is wrong about this flag. MSDN claims
// DEBUG_ONLY_THIS_PROCESS w/o DEBUG_PROCESS should be ignored.
// But it really should do launch as a debuggee (but not auto-attach to child processes).
case DEBUG_ONLY_THIS_PROCESS:
// Emprically, this is the common case for native / interop-debugging.
break;
// 4) Interop.
// The spec for ICorDebug::CreateProcess says this is the one to use for interop-debugging.
case DEBUG_PROCESS | DEBUG_ONLY_THIS_PROCESS:
// Win2k does not honor these flags properly. So we just use
// It treats (DEBUG_PROCESS | DEBUG_ONLY_THIS_PROCESS) as if it were DEBUG_PROCESS.
// We'll just always touch up the flags, even though WinXP and above is fine here.
// Per win2k issue, strip off DEBUG_PROCESS, so that we're just left w/ DEBUG_ONLY_THIS_PROCESS.
dwCreationFlags &= ~(DEBUG_PROCESS);
break;
default:
__assume(0);
}
#endif // FEATURE_INTEROP_DEBUGGING
// Must have a managed-callback by now.
if ((m_managedCallback == NULL) || (m_managedCallback2 == NULL) || (m_managedCallback3 == NULL) || (m_managedCallback4 == NULL))
{
ThrowHR(E_FAIL);
}
if (!IsCreateProcessSupported())
{
ThrowHR(E_NOTIMPL);
}
if (!IsInteropDebuggingSupported() &&
((dwCreationFlags & (DEBUG_PROCESS | DEBUG_ONLY_THIS_PROCESS)) != 0))
{
ThrowHR(CORDBG_E_INTEROP_NOT_SUPPORTED);
}
// Check that we can even accept another debuggee before trying anything.
EnsureAllowAnotherProcess();
} EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
return hr;
}
hr = ShimProcess::CreateProcess(this,
pRemoteTarget,
lpApplicationName,
lpCommandLine,
lpProcessAttributes,
lpThreadAttributes,
bInheritHandles,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
lpStartupInfo,
lpProcessInformation,
debuggingFlags
);
LOG((LF_CORDB, LL_EVERYTHING, "Handle in Cordb::CreateProcess is: %.I64x\n", lpProcessInformation->hProcess));
if (SUCCEEDED(hr))
{
LockProcessList();
CordbProcess * pProcess = GetProcessList()->GetBase(lpProcessInformation->dwProcessId);
UnlockProcessList();
PREFIX_ASSUME(pProcess != NULL);
pProcess->ExternalAddRef();
*ppProcess = (ICorDebugProcess *)pProcess;
}
return hr;
}
HRESULT Cordb::CreateProcessEx(ICorDebugRemoteTarget * pRemoteTarget,
LPCWSTR lpApplicationName,
__in_z LPWSTR lpCommandLine,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
PVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation,
CorDebugCreateProcessFlags debuggingFlags,
ICorDebugProcess ** ppProcess)
{
if (pRemoteTarget == NULL)
{
return E_INVALIDARG;
}
return CreateProcessCommon(pRemoteTarget,
lpApplicationName,
lpCommandLine,
lpProcessAttributes,
lpThreadAttributes,
bInheritHandles,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
lpStartupInfo,
lpProcessInformation,
debuggingFlags,
ppProcess);
}
//---------------------------------------------------------------------------------------
//
// Attachs to an existing process.
//
// Arguments:
// dwProcessID - The PID to attach to
// fWin32Attach - Flag to tell whether to attach as the Win32 debugger or not.
// ppProcess - Space to fill in for the resulting process, returned as a valid pointer
// on any success HRESULT.
//
// Return Value:
// Normal HRESULT semantics.
//
//---------------------------------------------------------------------------------------
HRESULT Cordb::DebugActiveProcess(DWORD dwProcessId,
BOOL fWin32Attach,
ICorDebugProcess **ppProcess)
{
return DebugActiveProcessCommon(NULL, dwProcessId, fWin32Attach, ppProcess);
}
HRESULT Cordb::DebugActiveProcessCommon(ICorDebugRemoteTarget * pRemoteTarget,
DWORD dwProcessId,
BOOL fWin32Attach,
ICorDebugProcess ** ppProcess)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess **);
HRESULT hr = S_OK;
EX_TRY
{
if (!m_initialized)
{
ThrowHR(E_FAIL);
}
// Must have a managed-callback by now.
if ((m_managedCallback == NULL) || (m_managedCallback2 == NULL) || (m_managedCallback3 == NULL) || (m_managedCallback4 == NULL))
{
ThrowHR(E_FAIL);
}
// Verify that given process ID, matches the process ID for which the object was created
if (m_pd.IsInitialized() && m_pd.m_Pid != dwProcessId)
{
ThrowHR(E_INVALIDARG);
}
// See the comment in Cordb::CreateProcess
_ASSERTE(CorDebugInvalidVersion != m_debuggerSpecifiedVersion);
// Check that we support the debugger version
CheckCompatibility();
// Check that we can even accept another debuggee before trying anything.
EnsureAllowAnotherProcess();
// Check if we're allowed to do interop.
bool fAllowInterop = IsInteropDebuggingSupported();
if (!fAllowInterop && fWin32Attach)
{
ThrowHR(CORDBG_E_INTEROP_NOT_SUPPORTED);
}
} EX_CATCH_HRESULT(hr)
if (FAILED(hr))
{
return hr;
}
hr = ShimProcess::DebugActiveProcess(
this,
pRemoteTarget,
&m_pd,
fWin32Attach == TRUE);
// If that worked, then there will be a process object...
if (SUCCEEDED(hr))
{
LockProcessList();
CordbProcess * pProcess = GetProcessList()->GetBase(dwProcessId);
if (pProcess != NULL)
{
// Add a reference now so process won't go away
pProcess->ExternalAddRef();
}
UnlockProcessList();
if (pProcess == NULL)
{
// This can happen if we add the process into process hash in
// SendDebugActiveProcessEvent and then process exit
// before we attemp to retrieve it again from GetBase.
//
*ppProcess = NULL;
return S_FALSE;
}
#if defined(FEATURE_DBGIPC_TRANSPORT_DI)
// This is where we queue the managed attach event in Whidbey. In the new architecture, the Windows
// pipeline gets a loader breakpoint when native attach is completed, and that's where we queue the
// managed attach event. See how we handle the loader breakpoint in code:ShimProcess::DefaultEventHandler.
// However, the Mac debugging transport gets no such breakpoint, and so we need to do this here.
//
// @dbgtodo Mac - Ideally we should hide this in our pipeline implementation, or at least move
// this to the shim.
_ASSERTE(!fWin32Attach);
{
pProcess->Lock();
hr = pProcess->QueueManagedAttach();
pProcess->Unlock();
}
#endif // FEATURE_DBGIPC_TRANSPORT_DI
*ppProcess = (ICorDebugProcess*) pProcess;
}
return hr;
}
// Make sure we want to support the debugger that's using us
void Cordb::CheckCompatibility()
{
// Get the debugger version specified by the startup APIs and convert it to a CLR major version number
CorDebugInterfaceVersion debuggerVersion = GetDebuggerVersion();
DWORD clrMajor;
if (debuggerVersion <= CorDebugVersion_1_0 || debuggerVersion == CorDebugVersion_1_1)
clrMajor = 1;
else if (debuggerVersion <= CorDebugVersion_2_0)
clrMajor = 2;
else if (debuggerVersion <= CorDebugVersion_4_0)
clrMajor = 4;
else
clrMajor = 5; // some unrecognized future version
if(!CordbProcess::IsCompatibleWith(clrMajor))
{
// Carefully choose our error-code to get an appropriate error-message from VS 2008
// If GetDebuggerVersion is >= 4, we could consider using the more-appropriate (but not
// added until V4) HRESULT CORDBG_E_UNSUPPORTED_FORWARD_COMPAT that is used by
// OpenVirtualProcess, but it's probably simpler to keep ICorDebug APIs returning
// consistent error codes.
ThrowHR(CORDBG_E_INCOMPATIBLE_PROTOCOL);
}
}
HRESULT Cordb::DebugActiveProcessEx(ICorDebugRemoteTarget * pRemoteTarget,
DWORD dwProcessId,
BOOL fWin32Attach,
ICorDebugProcess ** ppProcess)
{
if (pRemoteTarget == NULL)
{
return E_INVALIDARG;
}
return DebugActiveProcessCommon(pRemoteTarget, dwProcessId, fWin32Attach, ppProcess);
}
HRESULT Cordb::GetProcess(DWORD dwProcessId, ICorDebugProcess **ppProcess)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess**);
if (!m_initialized)
{
return E_FAIL;
}
LockProcessList();
CordbProcess *p = GetProcessList()->GetBase(dwProcessId);
UnlockProcessList();
if (p == NULL)
return E_INVALIDARG;
p->ExternalAddRef();
*ppProcess = static_cast<ICorDebugProcess*> (p);
return S_OK;
}
HRESULT Cordb::EnumerateProcesses(ICorDebugProcessEnum **ppProcesses)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppProcesses, ICorDebugProcessEnum **);
HRESULT hr = S_OK;
EX_TRY
{
if (!m_initialized)
{
ThrowHR(E_FAIL);
}
// Locking here just means that the enumerator gets initialized against a consistent
// process-list. If we add/remove processes w/ an outstanding enumerator, things
// could still get out of sync.
RSLockHolder lockHolder(&this->m_processListMutex);
RSInitHolder<CordbHashTableEnum> pEnum;
CordbHashTableEnum::BuildOrThrow(
this,
&m_pProcessEnumList,
GetProcessList(),
IID_ICorDebugProcessEnum,
pEnum.GetAddr());
pEnum.TransferOwnershipExternal(ppProcesses);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//
// Note: the following defs and structs are copied from various NT headers. I wasn't able to include those headers (like
// ntexapi.h) due to loads of redef problems and other conflicts with headers that we already pull in.
//
typedef LONG NTSTATUS;
#ifndef FEATURE_PAL
typedef BOOL (*NTQUERYSYSTEMINFORMATION)(SYSTEM_INFORMATION_CLASS SystemInformationClass,
PVOID SystemInformation,
ULONG SystemInformationLength,
PULONG ReturnLength);
#endif
// Implementation of ICorDebug::CanLaunchOrAttach
// @dbgtodo- this all goes away in V3.
// @dbgtodo- this should go away in Dev11.
HRESULT Cordb::CanLaunchOrAttach(DWORD dwProcessId, BOOL fWin32DebuggingEnabled)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
EX_TRY
{
EnsureCanLaunchOrAttach(fWin32DebuggingEnabled);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//---------------------------------------------------------------------------------------
//
// Throw an expcetion if we can't launch/attach.
//
// Arguments:
// fWin32DebuggingEnabled - true if interop-debugging, else false
//
// Return Value:
// None. If this returns, then it's safe to launch/attach.
// Else this throws an exception on failure.
//
// Assumptions:
//
// Notes:
// It should always be safe to launch/attach except in exceptional cases.
// @dbgtodo- this all goes away in V3.
// @dbgtodo- this should go away in Dev11.
//
void Cordb::EnsureCanLaunchOrAttach(BOOL fWin32DebuggingEnabled)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
if (!m_initialized)
{
ThrowHR(E_FAIL);
}
EnsureAllowAnotherProcess();
if (!IsInteropDebuggingSupported() && fWin32DebuggingEnabled)
{
ThrowHR(CORDBG_E_INTEROP_NOT_SUPPORTED);
}
// Made it this far, we succeeded.
}
HRESULT Cordb::CreateObjectV1(REFIID id, void **object)
{
return CreateObject(CorDebugVersion_1_0, ProcessDescriptor::UNINITIALIZED_PID, NULL, id, object);
}
#if defined(FEATURE_DBGIPC_TRANSPORT_DI)
// CoreCLR activates debugger objects via direct COM rather than the shim (just like V1). For now we share the
// same debug engine version as V2, though this may change in the future.
HRESULT Cordb::CreateObjectTelesto(REFIID id, void ** pObject)
{
return CreateObject(CorDebugVersion_2_0, ProcessDescriptor::UNINITIALIZED_PID, NULL, id, pObject);
}
#endif // FEATURE_DBGIPC_TRANSPORT_DI
// Static
// Used to create an instance for a ClassFactory (thus an external ref).
HRESULT Cordb::CreateObject(CorDebugInterfaceVersion iDebuggerVersion, DWORD pid, LPCWSTR lpApplicationGroupId, REFIID id, void **object)
{
if (id != IID_IUnknown && id != IID_ICorDebug)
return (E_NOINTERFACE);
LPSTR applicationGroupId = NULL;
if (lpApplicationGroupId != NULL)
{
// Get length of target string
int cbMultiByte = WideCharToMultiByte(CP_ACP, 0, lpApplicationGroupId, -1, NULL, 0, NULL, NULL);
if (cbMultiByte == 0)
{
return E_FAIL;
}
applicationGroupId = new (nothrow) CHAR[cbMultiByte];
if (applicationGroupId == NULL)
{
return (E_OUTOFMEMORY);
}
/* Convert to ASCII */
cbMultiByte = WideCharToMultiByte(CP_ACP, 0, lpApplicationGroupId, -1, applicationGroupId, cbMultiByte, NULL, NULL);
if (cbMultiByte == 0)
{
delete [] applicationGroupId;
return E_FAIL;
}
}
ProcessDescriptor pd = ProcessDescriptor::Create(pid, applicationGroupId);
Cordb *db = new (nothrow) Cordb(iDebuggerVersion, pd);
if (db == NULL)
{
if (applicationGroupId != NULL)
delete [] applicationGroupId;
return (E_OUTOFMEMORY);
}
*object = static_cast<ICorDebug*> (db);
db->ExternalAddRef();
return (S_OK);
}
// This is the version of the ICorDebug APIs that the debugger believes it's consuming.
// If this is a different version than that of the debuggee, we have the option of shimming
// behavior.
CorDebugInterfaceVersion
Cordb::GetDebuggerVersion() const
{
return m_debuggerSpecifiedVersion;
}
//***********************************************************************
// ICorDebugTMEnum (Thread and Module enumerator)
//***********************************************************************
CordbEnumFilter::CordbEnumFilter(CordbBase * pOwnerObj, NeuterList * pOwnerList)
: CordbBase (pOwnerObj->GetProcess(), 0),
m_pOwnerObj(pOwnerObj),
m_pOwnerNeuterList(pOwnerList),
m_pFirst (NULL),
m_pCurrent (NULL),
m_iCount (0)
{
_ASSERTE(m_pOwnerNeuterList != NULL);
HRESULT hr = S_OK;
EX_TRY
{
m_pOwnerNeuterList->Add(pOwnerObj->GetProcess(), this);
}
EX_CATCH_HRESULT(hr);
SetUnrecoverableIfFailed(GetProcess(), hr);
}
CordbEnumFilter::CordbEnumFilter(CordbEnumFilter *src)
: CordbBase (src->GetProcess(), 0),
m_pOwnerObj(src->m_pOwnerObj),
m_pOwnerNeuterList(src->m_pOwnerNeuterList),
m_pFirst (NULL),
m_pCurrent (NULL)
{
_ASSERTE(m_pOwnerNeuterList != NULL);
HRESULT hr = S_OK;
EX_TRY
{
m_pOwnerNeuterList->Add(src->GetProcess(), this);
}
EX_CATCH_HRESULT(hr);
SetUnrecoverableIfFailed(GetProcess(), hr);
int iCountSanityCheck = 0;
EnumElement *pElementCur = NULL;
EnumElement *pElementNew = NULL;
EnumElement *pElementNewPrev = NULL;
m_iCount = src->m_iCount;
pElementCur = src->m_pFirst;
while (pElementCur != NULL)
{
pElementNew = new (nothrow) EnumElement;
if (pElementNew == NULL)
{
// Out of memory. Clean up and bail out.
goto Error;
}
if (pElementNewPrev == NULL)
{
m_pFirst = pElementNew;
}
else
{
pElementNewPrev->SetNext(pElementNew);
}
pElementNewPrev = pElementNew;
// Copy the element, including the AddRef part
pElementNew->SetData(pElementCur->GetData());
IUnknown *iu = (IUnknown *)pElementCur->GetData();
iu->AddRef();
if (pElementCur == src->m_pCurrent)
m_pCurrent = pElementNew;
pElementCur = pElementCur->GetNext();
iCountSanityCheck++;
}
_ASSERTE(iCountSanityCheck == m_iCount);
return;
Error:
// release all the allocated memory before returning
pElementCur = m_pFirst;
while (pElementCur != NULL)
{
pElementNewPrev = pElementCur;
pElementCur = pElementCur->GetNext();
((ICorDebugModule *)pElementNewPrev->GetData())->Release();
delete pElementNewPrev;
}
}
CordbEnumFilter::~CordbEnumFilter()
{
_ASSERTE(this->IsNeutered());
_ASSERTE(m_pFirst == NULL);
}
void CordbEnumFilter::Neuter()
{
EnumElement *pElement = m_pFirst;
EnumElement *pPrevious = NULL;
while (pElement != NULL)
{
pPrevious = pElement;
pElement = pElement->GetNext();
delete pPrevious;
}
// Null out the head in case we get neutered again.
m_pFirst = NULL;
m_pCurrent = NULL;
CordbBase::Neuter();
}
HRESULT CordbEnumFilter::QueryInterface(REFIID id, void **ppInterface)
{
// if we QI with the IID of the base type, we can't just return a pointer ICorDebugEnum directly, because
// the cast is ambiguous. This happens because CordbEnumFilter implements both ICorDebugModuleEnum and
// ICorDebugThreadEnum, both of which derive in turn from ICorDebugEnum. This produces a diamond inheritance
// graph. Thus we need a double cast. It doesn't really matter whether we pick ICorDebugThreadEnum or
// ICorDebugModuleEnum, because it will be backed by the same object regardless.
if (id == IID_ICorDebugEnum)
*ppInterface = static_cast<ICorDebugEnum *>(static_cast<ICorDebugThreadEnum *>(this));
else if (id == IID_ICorDebugModuleEnum)
*ppInterface = (ICorDebugModuleEnum*)this;
else if (id == IID_ICorDebugThreadEnum)
*ppInterface = (ICorDebugThreadEnum*)this;
else if (id == IID_IUnknown)
*ppInterface = this;
else
{
*ppInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbEnumFilter::Skip(ULONG celt)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this);
{
while ((celt-- > 0) && (m_pCurrent != NULL))
{
m_pCurrent = m_pCurrent->GetNext();
}
}
PUBLIC_API_END(hr);
return hr;
}
HRESULT CordbEnumFilter::Reset()
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this);
{
m_pCurrent = m_pFirst;
}
PUBLIC_API_END(hr);
return hr;
}
HRESULT CordbEnumFilter::Clone(ICorDebugEnum **ppEnum)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this);
{
ValidateOrThrow(ppEnum);
CordbEnumFilter * pClone = new CordbEnumFilter(this);
// Ambiguous conversion from CordbEnumFilter to ICorDebugEnum, so
// we explicitly convert it through ICorDebugThreadEnum.
pClone->ExternalAddRef();
(*ppEnum) = static_cast<ICorDebugThreadEnum *> (pClone);
}
PUBLIC_API_END(hr);
return hr;
}
HRESULT CordbEnumFilter::GetCount(ULONG *pcelt)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this);
{
ValidateOrThrow(pcelt);
*pcelt = (ULONG)m_iCount;
}
PUBLIC_API_END(hr);
return hr;
}
HRESULT CordbEnumFilter::Next(ULONG celt,
ICorDebugModule *objects[],
ULONG *pceltFetched)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this);
{
hr = NextWorker(celt, objects, pceltFetched);
}
PUBLIC_API_END(hr);
return hr;
}
HRESULT CordbEnumFilter::NextWorker(ULONG celt, ICorDebugModule *objects[], ULONG *pceltFetched)
{
// <TODO>
//
// nickbe 11/20/2002 10:43:39
// This function allows you to enumerate threads that "belong" to a
// particular AppDomain. While this operation makes some sense, it makes
// very little sense to
// (a) enumerate the list of threads in the enter process
// (b) build up a hand-rolled singly linked list (grrr)
// </TODO>
VALIDATE_POINTER_TO_OBJECT_ARRAY(objects, ICorDebugModule *,
celt, true, true);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pceltFetched, ULONG *);
if ((pceltFetched == NULL) && (celt != 1))
{
return E_INVALIDARG;
}
if (celt == 0)
{
if (pceltFetched != NULL)
{
*pceltFetched = 0;
}
return S_OK;
}
HRESULT hr = S_OK;
ULONG count = 0;
while ((m_pCurrent != NULL) && (count < celt))
{
objects[count] = (ICorDebugModule *)m_pCurrent->GetData();
m_pCurrent = m_pCurrent->GetNext();
count++;
}
if (pceltFetched != NULL)
{
*pceltFetched = count;
}
//
// If we reached the end of the enumeration, but not the end
// of the number of requested items, we return S_FALSE.
//
if (count < celt)
{
return S_FALSE;
}
return hr;
}
HRESULT CordbEnumFilter::Next(ULONG celt,
ICorDebugThread *objects[],
ULONG *pceltFetched)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this);
{
hr = NextWorker(celt, objects, pceltFetched);
}
PUBLIC_API_END(hr);
return hr;
}
HRESULT CordbEnumFilter::NextWorker(ULONG celt, ICorDebugThread *objects[], ULONG *pceltFetched)
{
// @TODO remove this class
VALIDATE_POINTER_TO_OBJECT_ARRAY(objects, ICorDebugThread *, celt, true, true);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pceltFetched, ULONG *);
if ((pceltFetched == NULL) && (celt != 1))
{
return E_INVALIDARG;
}
if (celt == 0)
{
if (pceltFetched != NULL)
{
*pceltFetched = 0;
}
return S_OK;
}
HRESULT hr = S_OK;
ULONG count = 0;
while ((m_pCurrent != NULL) && (count < celt))
{
objects[count] = (ICorDebugThread *)m_pCurrent->GetData();
m_pCurrent = m_pCurrent->GetNext();
count++;
}
if (pceltFetched != NULL)
{
*pceltFetched = count;
}
//
// If we reached the end of the enumeration, but not the end
// of the number of requested items, we return S_FALSE.
//
if (count < celt)
{
return S_FALSE;
}
return hr;
}
HRESULT CordbEnumFilter::Init (ICorDebugModuleEnum * pModEnum, CordbAssembly *pAssembly)
{
INTERNAL_API_ENTRY(GetProcess());
ICorDebugModule *pCorModule = NULL;
CordbModule *pModule = NULL;
ULONG ulDummy = 0;
HRESULT hr = pModEnum->Next(1, &pCorModule, &ulDummy);
//
// Next returns E_FAIL if there is no next item, along with
// the count being 0. Convert that to just being S_OK.
//
if ((hr == E_FAIL) && (ulDummy == 0))
{
hr = S_OK;
}
if (FAILED (hr))
return hr;
EnumElement *pPrevious = NULL;
EnumElement *pElement = NULL;
while (ulDummy != 0)
{
pModule = (CordbModule *)(ICorDebugModule *)pCorModule;
// Is this module part of the assembly for which we're enumerating?
if (pModule->m_pAssembly == pAssembly)
{
pElement = new (nothrow) EnumElement;
if (pElement == NULL)
{
// Out of memory. Clean up and bail out.
hr = E_OUTOFMEMORY;
goto Error;
}
pElement->SetData ((void *)pCorModule);
m_iCount++;
if (m_pFirst == NULL)
{
m_pFirst = pElement;
}
else
{
PREFIX_ASSUME(pPrevious != NULL);
pPrevious->SetNext (pElement);
}
pPrevious = pElement;
}
else
((ICorDebugModule *)pModule)->Release();
hr = pModEnum->Next(1, &pCorModule, &ulDummy);
//
// Next returns E_FAIL if there is no next item, along with
// the count being 0. Convert that to just being S_OK.
//
if ((hr == E_FAIL) && (ulDummy == 0))
{
hr = S_OK;
}
if (FAILED (hr))
goto Error;
}
m_pCurrent = m_pFirst;
return S_OK;
Error:
// release all the allocated memory before returning
pElement = m_pFirst;
while (pElement != NULL)
{
pPrevious = pElement;
pElement = pElement->GetNext();
((ICorDebugModule *)pPrevious->GetData())->Release();
delete pPrevious;
}
return hr;
}
HRESULT CordbEnumFilter::Init (ICorDebugThreadEnum *pThreadEnum, CordbAppDomain *pAppDomain)
{
INTERNAL_API_ENTRY(GetProcess());
ICorDebugThread *pCorThread = NULL;
CordbThread *pThread = NULL;
ULONG ulDummy = 0;
HRESULT hr = pThreadEnum->Next(1, &pCorThread, &ulDummy);
//
// Next returns E_FAIL if there is no next item, but we want to consider this
// ok in this context.
//
if ((hr == E_FAIL) && (ulDummy == 0))
{
hr = S_OK;
}
if (FAILED(hr))
{
return hr;
}
EnumElement *pPrevious = NULL;
EnumElement *pElement = NULL;
while (ulDummy > 0)
{
pThread = (CordbThread *)(ICorDebugThread *) pCorThread;
// Is this module part of the appdomain for which we're enumerating?
// Note that this is rather inefficient (we call into the left side for every AppDomain),
// but the whole idea of enumerating the threads of an AppDomain is pretty bad,
// and we don't expect this to be used much if at all.
CordbAppDomain* pThreadDomain;
hr = pThread->GetCurrentAppDomain( &pThreadDomain );
if( FAILED(hr) )
{
goto Error;
}
if (pThreadDomain == pAppDomain)
{
pElement = new (nothrow) EnumElement;
if (pElement == NULL)
{
// Out of memory. Clean up and bail out.
hr = E_OUTOFMEMORY;
goto Error;
}
pElement->SetData ((void *)pCorThread);
m_iCount++;
if (m_pFirst == NULL)
{
m_pFirst = pElement;
}
else
{
PREFIX_ASSUME(pPrevious != NULL);
pPrevious->SetNext (pElement);
}
pPrevious = pElement;
}
else
{
((ICorDebugThread *)pThread)->Release();
}
// get the next thread in the thread list
hr = pThreadEnum->Next(1, &pCorThread, &ulDummy);
//
// Next returns E_FAIL if there is no next item, along with
// the count being 0. Convert that to just being S_OK.
//
if ((hr == E_FAIL) && (ulDummy == 0))
{
hr = S_OK;
}
if (FAILED (hr))
goto Error;
}
m_pCurrent = m_pFirst;
return S_OK;
Error:
// release all the allocated memory before returning
pElement = m_pFirst;
while (pElement != NULL)
{
pPrevious = pElement;
pElement = pElement->GetNext();
((ICorDebugThread *)pPrevious->GetData())->Release();
delete pPrevious;
}
return hr;
}
|
def _remove_all_networks(hil_client, node):
node_info = show_node(hil_client, node)
for nic in node_info['nics']:
port = nic['port']
switch = nic['switch']
if port and switch:
try:
hil_client.port.port_revert(switch, port)
log_info('Removed all networks from node `%s`' % node)
except FailedAPICallException, ConnectionError:
log_error('Failed to revert port `%s` on node `%s` switch `%s`' % (port, node, switch))
raise HILClientFailure()
|
#include<stdio.h>
int main(){
long long a,b,c,d,a1,b1,c1;
scanf("%lld%lld%lld%lld",&a,&b,&c,&d);
if(d>=a && d>=b && d>=c )
{
a=a;
b=b;
c=c;
}
if(b>=a && b>=d && b>=c )
{
a=a;
b=d;
c=c;
}
if(c>=a && c>=b && c>=d )
{
a=a;
b=b;
c=d;
}
if(a>=b && a>=c && a>=d )
{
a=d;
b=b;
c=c;
}
a1=(a-c+b)/2;
b1=a-a1;
c1=b-a1;
printf("%lld %lld %lld\n",a1,b1,c1);
return 0;
}
|
English Language and Literature Students’ Perceptions of Critical Thinking
The fundamental purpose of the function of thinking is to attribute meaning to events in our lives, classify these events into categories and identify them subjectively. As an important dimension of education on thinking, the teaching of critical thinking since the 1950s has attained a wide conceptual framework. Accepted as a factor that facilitates the aquisition and comprehension of information, critical thinking is a desirable learning outcome of education. Some of the characteristics and properties of critical thinking are being aware, systematic, flexible and patient, encompassing open-meaning, metacognition, reconstruction, motivation and discussion. From this point, the perceptions of the students of Karabuk University, English Language and Literature Department, regarding critical thinking, were investigated in detail, in the Spring semester of the academic year 2017-2018. This study is a descriptive one and utilises the survey model to determine the levels of critical thinking in 222 teacher candidates, comprised of students ranging from freshmen to seniors and from both daytime and evening education. The findings have been evaluated and a set of conclusions have been drawn. While a statistical difference according to classes was not found when Anova results were analysed on the sub-dimensions of metacognition, flexibility, systematicity, open-mindedness, a difference was found between the sophomore and senior students on the sub-dimension of perseverance-patience.
Introduction
Educational activities in every field prove more and more difficult due to changing human needs (such as employment career and prestige) and after each new piece of knowledge or skill acquired, the need for the learning of a new skill arises. The importance of educational institutions these days has also started to increase progressively due to the persistence of lifelong education. Individuals are inherently thinking beings, yet this does not necessarily make them good thinkers. The difference between thinking and thinking well, is similar to the difference between walking and dancing. While one of them does not arise automatically and requires a certain effort, the other is learned naturally (Gelder 2005;Ritchart & Perkins 2005).
The skill of thinking is a natural function found in every individual. Presseisen (1985) approaches thinking skills in a progressive manner as "basic processes, problem solving, decision making and creative thinking". That said, thinking itself can sometimes be deficient, prejudiced and insufficient. Fisher (2001), Halpern (2003) and Haskins (2006) suggest that if students are taught how to think, their thinking skills can improve. The way to think depends upon properties such as being conscious and systematic, having flexibility, patience and incorporating open-meaning, metacognition, reconstruction, motivation and discussion (Lee, 1989;Kazancı, 1989). These properties emphasise that the act of thinking should disentangle itself from prejudices and that it should occur systematically. These properties also verbalise how to think in a critical manner. On the other hand, there exists a powerful and irresistible relationship between the development of formidable and deep language skills with regard to the effective utilisation of English as a foreign language, and the parallel development of an equally formidable critical thinking ability.
Along with initiating the development of critical skills between both teachers and students, schools should also undertake the role of encouraging development in critical thinking skills (Malamitsa et al., 2008). When students gain skills in critical thinking, not only will they have control over when and what they think, but also how they think (Osborne et al., 2009). Therefore, thinking skills are improved through asking questions, collecting data and drawing conclusions from related data (Elder and Paul, 2003). In line with this, critical thinking is not only high-level thinking but also a way of seeking knowledge for traditional, social, historical and political roots as well as directing learning into society (Benesch, 1993). It needs to be stated that the success of the student does not merely stem from their having the knowledge of the so called skills such as asking questions, collecting data and critical thinking abilities. Thinking well is based on habits as well as knowledge and attitude. It is necessary to engage in practice in order to learn how to think critically, just as it is in learning a difficult sport. A person who has a great intellect or a very good level of knowledge will not become a good critical thinker merely by virtue of these qualities if he/she does not practice (Allen, 2004;Haskins, 2006;Ritchart and Perkins, 2005). Grant (1988;as cited in Özdemir, 2005) propounds that critical thinking can be generally considered as processes or manner of thinking such as reasoning, high-level thinking, acting wisely and thinking creatively. Beyer (1985) perceives critical thinking as a skill of collecting information, evaluating it and using it effectively. Cüceloğlu (1995) defines critical thinking as "an active and organized mental process in which the aim is to be able to understand ourselves and phenomena around us, by being aware of our own thinking processes and taking others' opinions into account". Semerci (2016) describes the matter of critical thinking as "a stage involving the examination and evaluation of prejudices, assumptions and every kind of information presented; the discussion of different aspects and conclusions through which a decision is aimed to be made and being able to evaluate how viable the justifications are for the decision". What is obtained from these assumptions, justifications and inferences is put forth at the stage of adequacy. At the stage of establishing communication, the case is that the individual communicates what he/she understands comprehensibly in order to create unanimity and to make a joint decision (Friedel et al., 2008). A series of conceptual tools and strategies related with intellectual skills (Rudinow and Barry, 2007) focused on what to do or what to believe in (Ennis, 1986), comprises critical thinking. According to Ennis (1996) and Lipman (2003;as cited in Topoğlu and Ünal, 2013), decision-making is the primary factor of critical thinking. Reflectiveness and rationality are factors found in prevalent definitions of critical thinking. Similarly, McPeck (1981) has explicated critical thinking as "the education and skill of attempting activity with a reflectivist scepticism".
Critical Thinking: Qualities
Facione is another researcher who has put forth that the individual who has studied these skills, thinks critically and compels attention through utilising certain skills effectively. According to him, critical thinking skills can be expressed as "interpretation, analysis, evaluation, inference, explanation and self-regulation" (Facione, 1990;Türnüklü-Yeşildere, 2005): i. Analysis: Defining the relations between the states, questions, concepts, definitions and other factors comprising beliefs, judgements, experiences, knowledge or thoughts.
ii. Interpretation: Expressing the meaning or significance of a state, experience, datum, occurrence, judgement, belief, rule or criterion.
iii. Evaluation: A person's assessment of the states that define his/her perception, experience, belief, judgement or opinion in terms of credibility and reliability. iv. Inference: Being able to reach rational conclusions, developing healthy projections and hypotheses; taking into account the relevant information in presentations such as data, judgement, state, principle, evidence, belief, opinion, definition, concept and question.
v. Explanation: Putting forth the conclusions drawn from reasoning, judging this reasoning with proof and criteria in conceptual, methodological, contextual dimensions and expressing it with convincing evidence.
Facione (2007) also denotes the criteria of an ideal critical thinker according to the Delphi Report consensus as inquisitive, well informed, trustful of reason, open minded, flexible, and fair minded in evaluation, willing to reconsider, diligent in seeking relevant information. Critical thinking is also awareness of one's own thinking and reflection on the thinking of the self and others as an object of cognition" (Kuhn & Dean, 2004;cited in Semerci & Elaldı, 2014). Therefore, critical thinking is related to the development of metacognitive understanding which is essential to lead to high levels of cognition (Lockwood, 2003). The aforementioned properties express individuals' knowledge of themselves and their talents, as well as their ability to receive opinions from others without prejudice, and to revise and change their own opinions when necessary. Besides, they emphasise the affective attributes such as curiosity, flexibility, self-confidence and impartiality along with intellectual skills. Another compulsory condition of being a good critical thinker is to practice. The necessity to use intellectual skills in conjunction with effective ones, exhibits the difference between the teacher and the environment to be created in teaching critical thinking in a conventional classroom. First and foremost, the environment created should be secure as to allow the students to express themselves comfortably. Strategies such as asking questions, collaborative working, utilising knowledge, creating sensitivity and versatile perspective (Berman, 1991;Haskins, 2006) should be taken into consideraton. However, situations that hinder the establishment of such an environment are the inability to think independently, over-confidence in logic, hardheadedness and stubbornness, complacency, perfectionism, exhibition of dogmatic behaviour, acting impulsively, over-confidence in relation to the teacher and lack of self-confidence (Clark & Starr, 1991;Fisher, 2001;Halpern, 2003;Haskins, 2006). Teachers who aim to develop critical thinking in their students should remove the obstacles arising from different factors and improve their thinking. In order to do this, an assessment should be made and the disposition of the students in respect to critical thinking should be measured. In this study, a scale to measure this disposition towards critical thinking has been planned .
Method
This is a descriptive study, orientated towards analysing the critical thinking disposition of teacher candidates. The survey method has been used with the aim of determining the present levels of the critical thinking disposition of teacher candidates receiving education in the Karabük University English Language and Literature Department. With this model, events and objects are described in their present situation (Sönmez & Alacapınar, 2011;Karasar, 1995). In the study, the "Critical Thinking Disposition (CTD)" scale has been utilised and consists of forty-nine items, put forth by Semerci (2000) and further developed by Semerci (2016). The validity and reliability of the CTD scale has been ensured and its confirmatory factor analysis has been performed through AMOS software (Kay square=2778.981, Sd=1073, X 2 /Sd=2.590, GFI=0.903, CFI=0.932, RMSEA=0.038), and according to these results, the applicability of the scale to teachers and teacher candidates has been expressed by Semerci (2016).
Population and Study Sample
The population of the study is comprised of 222 students out of 500 studying at Karabük University Faculty of Literature in the English Language and Literature Department during the Spring semester of 2017-2018. The study was sampled in a standard way. In this type of research, the sample is randomly ascertained from a pre-determined list. In addition, the point to consider here is that the factors in the list created, consist of similar features such as age, level, etc., (Çepni, 2007). The survey related to critical thinking consisted of forty-nine items was applied to the students receiving education in the English Language and Literature Department at Karabük University and the results were evaluated.
The experiment was applied to 222 students, 149 (67.1%) of whom were female students and 73 (32.9%) of whom are male students. Table 1.
Female 149
Male 73 In addition to this, 112 (50.5%) of students were selected from daytime attendance while 110 (49.5%) of them were selected from evening attendance. The university entrance exam scores of the students receiving evening education are slightly lower than the students receiving education during the day.
Results
The t-test was applied in the experiments. This is known to compare two averages (means) and highlight the differences. When the mean and standard deviation results of the sub-dimensions of critical thinking according to classes were analysed, it was seen that students focused on the "Mostly Agree (4)" option on the sub-dimensions of metacognition, flexibility, systematicity, perseverance and patience, and open-mindedness. The highest mean was seen in open-mindedness in senior-year students while the lowest mean was seen in perseverance-patience in junior-year students. .05), a difference has been found between the sophomore and senior students on the sub-dimension of perseverance-patience (F=2.103, P>0.05).
Conclusion and Discussion
Critical thinking is a desired objective of current educational programmes. However, individuals who can effectively utilise critical thinking in their lives are individuals who possess the skills and disposition necessary for critical thinking. 222 students receiving education in the English Language and Literature Department at Karabük University participated in the study orientated towards measuring their perceptions of subdimensions regarding critical thinking.
The aim was to ascertain the perception of students with regard to critical thought from the scale developed by Semerci (2016) which consists of forty-nine items the reliability of which is quite high has been applied and the data analysed. According to this; When the test results regarding critical thinking according to the variable of gender were examined in order to understand the importance of gender factor, it was seen that there is no statistical difference according to gender between the sub-dimensions of metacognition, flexibility, systematicity, perseverance-patience and open-mindedness.
When the test result regarding critical thinking according to groups (Daytime education, Evening education) was analysed, it was seen that there is no statistical difference between the sub-dimensions of metacognition, flexibility, systematicity, perseverance-patience and open-mindedness.
When the mean and standard deviation results of the sub-dimensions of critical thinking according to classes were analysed, students focused on the "Mostly Agree (4)" option on the sub-dimensions of metacognition, flexibility, systematicity, perseverance and patience, and open-mindedness. The highest mean was seen in open-mindedness in senior-year students while the lowest mean was seen in perseverance-patience in junior-year students. While a statistical difference according to classes was not found when Anova results were analysed on the sub-dimensions of metacognition, flexibility, systematicity, open-mindedness, a difference was found between the sophomore and senior students on the sub-dimension of perseverance-patience.
It is seen that senior-year students have more perseverance and patience in comparison to sophomore-year students in the sub-dimension of perseverance-patience. That is to say, as the class level gets higher, the students gain more perseverance and patience in general. This result is in line with the ideas by Lee (1989) and Kazancı (1989) which support the theory that the way to think depends upon properties such as being conscious and systematic, flexible, patient, open-meaning and receptive to metacognition, reconstruction, motivation and discussion.
In conclusion, it can be said that critical thinking is an important cognitive skill which affects students' viewpoints on events, their perceptions concerning what transpires in their environment and their decision-making processes. The instillment of critical thinking skills and its application should be among the objectives of contemporary education programmes and should occupy the fundamental position in the learning of thinking skills. In addition to this, the responsibility to create more effective critical thinking environments in classrooms, falls to a great extent on the shoulders of teachers and instuctors.
|
start=[]
for i in range(8): start.append(input())
a=[start]
#start=[".......A","........","........","........","........",".SSSSSSS","S.......","M......."]
#a=[start]
for i in range(10):
tmp=a[-1]
tmp=[".......A"]+tmp
tmp[1]=tmp[1][:-1]+"."
a.append(tmp[:-1])
dx=[-1,1,0,0,0,1,1,-1,-1]
dy=[0,0,-1,1,0,-1,1,-1,1]
def chk(x,y,step):
if a[step][y][x]=="S": return False
if step==9:return True
for i in range(8):
x_,y_=x+dx[i],y+dy[i]
if min(x_,y_)<0 or max(x_,y_)>7:continue
if a[step][y_][x_]!='S' and chk(x_,y_,step+1): return True
return False
if chk(0,7,0):
print("WIN")
else:
print("LOSE")
|
<reponame>scjalliance/drivestream
package main
import (
"context"
"fmt"
"github.com/scjalliance/drivestream/driveapicollector"
"github.com/scjalliance/drivestream/resource"
drive "google.golang.org/api/drive/v3"
)
func listPermissions(ctx context.Context, s *drive.Service, id string) (perms []resource.Permission, err error) {
var token string
for {
call := s.Permissions.List(id)
call.Context(ctx)
call.SupportsTeamDrives(true)
call.UseDomainAdminAccess(true)
call.Fields("nextPageToken", "permissions(id,type,emailAddress,domain,role,displayName,expirationTime,deleted)")
if token != "" {
call.PageToken(token)
}
list, err := call.Do()
if err != nil {
return nil, fmt.Errorf("unable to retrieve teamdrive permissions: %v", err)
}
for i, perm := range list.Permissions {
record, err := driveapicollector.MarshalPermission(perm)
if err != nil {
return nil, fmt.Errorf("permission list parsing failed: record %d: %v", i, err)
}
perms = append(perms, record)
}
if list.NextPageToken == "" {
return perms, nil
}
token = list.NextPageToken
}
}
|
Tens of thousands of Taiwanese protesters have taken to the streets in Taipei in a bid to pressure President Ma Ying-jeou to retract a controversial trade pact with China.
The protesters, many wearing black shirts and headbands reading "defend democracy, retract service trade pact," crowded the streets leading to the presidential office, despite fresh concessions offered by Ma to further scrutinise agreements with China.
"The people cannot accept the government's reckless decision to sign the service trade pact with China. The government doesn't listen to the people who demand the pact be retracted," said protester Ko Hsuan-yu, a 25-year-old graduate student.
Security was tight as 3,500 police were deployed in the official presidential neighbourhood.
The high security presences comes after violent clashes on March 23, when a group of protesters stormed government headquarters. More than 100 people were injured when baton-wielding riot police used water cannon to disperse demonstrators and retake the government building in that incident.
Crowd estimated at 87,000
Organisers aimed to attract more than 100,000 people to the Sunday protest and vowed to hold a peaceful demonstration. An initial police estimate of the crowd was around 87,000.
"We will not react to any violent actions from the police or other groups. We will maintain peace ... any form of violence is totally prohibited in this movement," said student leader Lin Fei-fan.
Around 200 student-led demonstrators have occupied parliament's main chamber for nearly two weeks in a bid to stop the trade pact with China.
Ma, who has sought closer ties with China since becoming the island's leader in 2008, on Saturday said he would agree to demands that a law be introduced to monitor all agreements with China.
But he rejected the protesters' main demand - that the service trade agreement signed with China in June last year be retracted, saying it could "cause too much damage to Taiwan".
The pact is designed to open up further trade in services between China and Taiwan, which split 65 years ago after a civil war.
But the protesters say the deal will damage Taiwan's economy and leave it vulnerable to political pressure from China.
The deal is a follow-up agreement to a sweeping Economic Cooperation Framework Agreement signed in 2010 to reduce trade barriers between China and Taiwan.
Ma, whose approval ratings sit at just 10 percent, has overseen a marked thaw in relations with Beijing since he came to power pledging to strengthen trade and tourism links. He was re-elected in January 2012.
China, however, still considers Taiwan as part of its territory awaiting reunification - by force if necessary.
|
Small-Field Approach Holds Promise for Operators in Southeast Asia
This article, written by Special Publications Editor Adam Wilson, contains highlights of paper SPE 186434, “Small Is Beautiful: Why Small Fields Are the Next Big Thing in Southeast Asia and How to Capitalize on Them,” by Arnold Volkenborn, Andrew Lea-Cox, SPE, and Bo Sun, Accenture Strategy Energy, prepared for the 2017 SPE/IATMI Asia Pacific Oil and Gas Conference and Exhibition, Bali, Indonesia, 17–19 October. The paper has not been peer reviewed.
This paper shows how a new approach to small fields could unlock more than twice the net present value (NPV) of larger conventional fields in Southeast Asia at a similar level of capital expenditure (CAPEX). Capturing this value, though, will require a fundamental change in the operating models of operators in the region. The small-field approach focuses on margin maximization instead of recovery and productivity maximization.
Introduction
Southeast Asia has attracted sizeable foreign interest in recent years after weathering the global financial crisis while establishing strong manufacturing bases, expanding domestic infrastructure, and stabilizing financial markets. These developments fueled economies and continued to push energy demand higher.
However, Southeast Asia is facing its own challenges. Oil and gas companies in the region have been tasked with meeting domestic demand through local petroleum resources since the 1990s. Although companies have continued to discover new fields, efforts were insufficient to meet the growing demand, causing Southeast Asia to become a net importer in the early 2000s.
Moreover, the industry has accepted a new oil price norm approximately 60% lower than June 2014 levels. As some early signs of recovery emerge, another disruption is on the horizon: Because of a combination of technological and socio economic factors, demand for oil is expected to peak as early as 2030. Concurrently, on the supply side, assets such as shale and nonhydrocarbon sources (e.g., solar and wind) are eating increasingly into oil’s share of the energy market. These factors are forcing exploration and production (E&P) companies to cut capital and operational spending significantly and refocus their portfolios on core areas of advantage. Investors now are wary of locking up capital in long-cycle megaprojects.
Given the smaller size of discoveries, the need for decreased spending, and the demand for shorter-cycle projects, significant market opportunities exist for any player who can make a small-field business model work.
Potential of Small Fields
The future of Southeast Asia is in small fields, defined here as fields with less than 100 million BOE of reserves. First, the region is scattered with such discoveries and prospects. Second, and more importantly, a small-field approach addresses the current investor preference for high-margin, short-cycle projects. While technically difficult, small fields have great potential in terms of unlocking barrels to fill domestic demand and producing higher returns for capital-constrained companies.
Extent of Small Fields in Southeast Asia. The average size of discoveries in Southeast Asia has decreased to approximately 35 million BOE in the last 5 years, compared with two to three times that in the 1970s–1990s.
|
/* Communities structs have `->str` which is used
* for vty outputs and extended BGP community lists
* with regexp.
* This is a helper to convert already aliased version
* of communities into numerical-only format.
*/
char *bgp_alias2community_str(const char *str)
{
char **aliases;
char *comstr;
int num, i;
frrstr_split(str, " ", &aliases, &num);
const char *communities[num];
for (i = 0; i < num; i++)
communities[i] = bgp_alias2community(aliases[i]);
comstr = frrstr_join(communities, num, " ");
for (i = 0; i < num; i++)
XFREE(MTYPE_TMP, aliases[i]);
XFREE(MTYPE_TMP, aliases);
return comstr;
}
|
/* eslint-disable @typescript-eslint/no-unused-vars */
/*
* Copyright (c) 2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { AbstractClient } from "../../../common/abstract_client"
import { ClientConfig } from "../../../common/interface"
import {
ActivityJoinDetail,
CreateCorpTagResponse,
QueryChatArchivingListResponse,
LicenseInfo,
QueryExternalContactDetailRequest,
QueryExternalUserMappingInfoResponse,
TagGroup,
ChannelCodeInnerDetail,
ExternalContact,
TagInfo,
TagDetailInfo,
VehicleInfo,
ChatArchivingMsgTypeVideo,
CreateLeadResponse,
QueryVehicleInfoListResponse,
QueryVehicleInfoListRequest,
QueryExternalContactListRequest,
MiniAppCodeInfo,
QueryClueInfoListResponse,
CreateChannelCodeRequest,
QueryExternalContactDetailResponse,
FollowUser,
QueryExternalContactListResponse,
ExternalContactSimpleInfo,
QueryActivityLiveCodeListResponse,
CreateCorpTagRequest,
ExternalUserMappingInfo,
QueryLicenseInfoResponse,
QueryClueInfoListRequest,
ActivityDetail,
LiveCodeDetail,
ExternalContactTag,
ChatArchivingDetail,
QueryActivityListResponse,
QueryActivityJoinListRequest,
DealerInfo,
QueryExternalUserMappingInfoRequest,
QueryChannelCodeListRequest,
QueryDealerInfoListResponse,
WeComTagDetail,
QueryActivityLiveCodeListRequest,
QueryActivityJoinListResponse,
ClueInfoDetail,
QueryDealerInfoListRequest,
QueryMiniAppCodeListResponse,
CreateChannelCodeResponse,
QueryLicenseInfoRequest,
QueryActivityListRequest,
QueryMiniAppCodeListRequest,
QueryChatArchivingListRequest,
CreateLeadRequest,
QueryChannelCodeListResponse,
} from "./wav_models"
/**
* wav client
* @class
*/
export class Client extends AbstractClient {
constructor(clientConfig: ClientConfig) {
super("wav.tencentcloudapi.com", "2021-01-29", clientConfig)
}
/**
* 根据游标拉取渠道活码列表信息
*/
async QueryChannelCodeList(
req: QueryChannelCodeListRequest,
cb?: (error: string, rep: QueryChannelCodeListResponse) => void
): Promise<QueryChannelCodeListResponse> {
return this.request("QueryChannelCodeList", req, cb)
}
/**
* 根据游标拉取活动活码列表信息
*/
async QueryActivityLiveCodeList(
req: QueryActivityLiveCodeListRequest,
cb?: (error: string, rep: QueryActivityLiveCodeListResponse) => void
): Promise<QueryActivityLiveCodeListResponse> {
return this.request("QueryActivityLiveCodeList", req, cb)
}
/**
* 企业可通过此接口,根据外部联系人的userid,拉取外部联系人详情
*/
async QueryExternalContactDetail(
req: QueryExternalContactDetailRequest,
cb?: (error: string, rep: QueryExternalContactDetailResponse) => void
): Promise<QueryExternalContactDetailResponse> {
return this.request("QueryExternalContactDetail", req, cb)
}
/**
* 根据游标拉取活动列表信息
*/
async QueryActivityList(
req: QueryActivityListRequest,
cb?: (error: string, rep: QueryActivityListResponse) => void
): Promise<QueryActivityListResponse> {
return this.request("QueryActivityList", req, cb)
}
/**
* 企业可通过此接口获取录入在企微SaaS平台上的经销商信息。
*/
async QueryDealerInfoList(
req: QueryDealerInfoListRequest,
cb?: (error: string, rep: QueryDealerInfoListResponse) => void
): Promise<QueryDealerInfoListResponse> {
return this.request("QueryDealerInfoList", req, cb)
}
/**
* 企业可通过此接口获取企微SaaS平台上的车系车型信息。
*/
async QueryVehicleInfoList(
req: QueryVehicleInfoListRequest,
cb?: (error: string, rep: QueryVehicleInfoListResponse) => void
): Promise<QueryVehicleInfoListResponse> {
return this.request("QueryVehicleInfoList", req, cb)
}
/**
* 线索回收接口
*/
async CreateLead(
req: CreateLeadRequest,
cb?: (error: string, rep: CreateLeadResponse) => void
): Promise<CreateLeadResponse> {
return this.request("CreateLead", req, cb)
}
/**
* 企业可通过此接口获取线索列表。
*/
async QueryClueInfoList(
req: QueryClueInfoListRequest,
cb?: (error: string, rep: QueryClueInfoListResponse) => void
): Promise<QueryClueInfoListResponse> {
return this.request("QueryClueInfoList", req, cb)
}
/**
* 根据游标拉取会话存档列表信息
*/
async QueryChatArchivingList(
req: QueryChatArchivingListRequest,
cb?: (error: string, rep: QueryChatArchivingListResponse) => void
): Promise<QueryChatArchivingListResponse> {
return this.request("QueryChatArchivingList", req, cb)
}
/**
* 企业可通过此接口基于外部联系人获取指定成员添加的客户列表。客户是指配置了客户联系功能的成员所添加的外部联系人。没有配置客户联系功能的成员,所添加的外部联系人将不会作为客户返回。
*/
async QueryExternalContactList(
req: QueryExternalContactListRequest,
cb?: (error: string, rep: QueryExternalContactListResponse) => void
): Promise<QueryExternalContactListResponse> {
return this.request("QueryExternalContactList", req, cb)
}
/**
* 该接口用户设置标签库, 每个企业最多可配置3000个企业标签。
*/
async CreateCorpTag(
req: CreateCorpTagRequest,
cb?: (error: string, rep: CreateCorpTagResponse) => void
): Promise<CreateCorpTagResponse> {
return this.request("CreateCorpTag", req, cb)
}
/**
* 新增渠道活码接口
*/
async CreateChannelCode(
req: CreateChannelCodeRequest,
cb?: (error: string, rep: CreateChannelCodeResponse) => void
): Promise<CreateChannelCodeResponse> {
return this.request("CreateChannelCode", req, cb)
}
/**
* 企业可通过此接口将企业主体对应的外部联系人id转换为乐销车应用主体对应的外部联系人。
*/
async QueryExternalUserMappingInfo(
req: QueryExternalUserMappingInfoRequest,
cb?: (error: string, rep: QueryExternalUserMappingInfoResponse) => void
): Promise<QueryExternalUserMappingInfoResponse> {
return this.request("QueryExternalUserMappingInfo", req, cb)
}
/**
* 根据游标拉取活动参与列表信息
*/
async QueryActivityJoinList(
req: QueryActivityJoinListRequest,
cb?: (error: string, rep: QueryActivityJoinListResponse) => void
): Promise<QueryActivityJoinListResponse> {
return this.request("QueryActivityJoinList", req, cb)
}
/**
* 该接口获取license对应的详细信息
*/
async QueryLicenseInfo(
req: QueryLicenseInfoRequest,
cb?: (error: string, rep: QueryLicenseInfoResponse) => void
): Promise<QueryLicenseInfoResponse> {
return this.request("QueryLicenseInfo", req, cb)
}
/**
* 查询小程序码列表接口
*/
async QueryMiniAppCodeList(
req: QueryMiniAppCodeListRequest,
cb?: (error: string, rep: QueryMiniAppCodeListResponse) => void
): Promise<QueryMiniAppCodeListResponse> {
return this.request("QueryMiniAppCodeList", req, cb)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.