content
stringlengths 10
4.9M
|
---|
<filename>tests/test_kmeans_clustering.py
from unittest import mock
from pathlib import Path
import os
import numpy as np
import pytest
from geojson import FeatureCollection, Feature
import rasterio as rio
from fake_geo_images.fakegeoimages import FakeGeoImage
from blockutils.common import ensure_data_directories_exist, TestDirectoryContext
from blockutils.exceptions import UP42Error
from context import KMeansClustering, raise_if_too_large
@pytest.fixture(scope="session", autouse=True)
def fixture():
ensure_data_directories_exist()
def test_kmeans_clustering():
lcc = KMeansClustering(n_clusters=5, n_iterations=5, n_sieve_pixels=1)
input_ar = np.random.uniform(0, 255, 30000).reshape(100, 100, 3)
clusters_ar = lcc.run_kmeans(input_ar)
assert len(clusters_ar.flatten()) == 10000
assert len(np.unique(clusters_ar)) == 5
assert np.min(clusters_ar) == 0
assert np.max(clusters_ar) == 4
lcc = KMeansClustering(n_clusters=3, n_iterations=8, n_sieve_pixels=16)
input_ar = np.random.uniform(0, 10, 400000).reshape(200, 200, 10)
clusters_ar = lcc.run_kmeans(input_ar)
assert len(clusters_ar.flatten()) == 40000
assert len(np.unique(clusters_ar)) == 3
assert np.min(clusters_ar) == 0
assert np.max(clusters_ar) == 2
def test_process():
lcc = KMeansClustering(n_clusters=5, n_iterations=5, n_sieve_pixels=1)
with TestDirectoryContext(Path("/tmp")) as temp:
image_path, _ = FakeGeoImage(
100, 100, 4, "uint16", out_dir=temp / "input", nodata=-1
).create(seed=100)
input_fc = FeatureCollection(
[
Feature(
geometry={
"type": "Polygon",
"coordinates": [
[
[-8.89411926269531, 38.61687046392973],
[-8.8604736328125, 38.61687046392973],
[-8.8604736328125, 38.63939998171362],
[-8.89411926269531, 38.63939998171362],
[-8.89411926269531, 38.61687046392973],
]
],
},
properties={"up42.data_path": str(image_path.name)},
)
]
)
output_fc = lcc.process(input_fc)
assert output_fc.features
def test_process_float_with_nodata():
lcc = KMeansClustering(n_clusters=5, n_iterations=5, n_sieve_pixels=1)
with TestDirectoryContext(Path("/tmp")) as temp:
image_path, _ = FakeGeoImage(
100, 100, 4, "float", out_dir=temp / "input", nodata=-9999.0, nodata_fill=5
).create(seed=100)
input_fc = FeatureCollection(
[
Feature(
geometry={
"type": "Polygon",
"coordinates": [
[
[-8.89411926269531, 38.61687046392973],
[-8.8604736328125, 38.61687046392973],
[-8.8604736328125, 38.63939998171362],
[-8.89411926269531, 38.63939998171362],
[-8.89411926269531, 38.61687046392973],
]
],
},
properties={"up42.data_path": str(image_path.name)},
)
]
)
output_fc = lcc.process(input_fc)
assert output_fc.features
with rio.open(
os.path.join(
"/tmp/output", output_fc.features[0]["properties"]["up42.data_path"]
)
) as src:
assert src.meta["nodata"] == 255
band = src.read(1)
assert np.all(band[:5, :5] == 255)
def test_process_nodata_path():
lcc = KMeansClustering(n_clusters=5, n_iterations=5, n_sieve_pixels=1)
with TestDirectoryContext(Path("/tmp")) as temp:
image_path, _ = FakeGeoImage(
100, 100, 4, "float", out_dir=temp / "input", nodata=-9999.0, nodata_fill=5
).create(seed=100)
input_fc = FeatureCollection(
[
Feature(
geometry={
"type": "Polygon",
"coordinates": [
[
[-8.89411926269531, 38.61687046392973],
[-8.8604736328125, 38.61687046392973],
[-8.8604736328125, 38.63939998171362],
[-8.89411926269531, 38.63939998171362],
[-8.89411926269531, 38.61687046392973],
]
],
},
properties={"up42.abc": str(image_path.name)},
)
]
)
with pytest.raises(UP42Error, match=r".*[NO_INPUT_ERROR].*"):
lcc.process(input_fc)
def test_raise_if_too_large():
with mock.patch("rasterio.DatasetReader") as src:
instance = src.return_value
instance.meta["dtype"] = "uint8"
instance.count = 4
instance.shape = (10, 10)
raise_if_too_large(instance)
with pytest.raises(UP42Error, match=r".*[WRONG_INPUT_ERROR].*"):
instance.meta = {"dtype": "float32"}
instance.count = 4
instance.shape = (500000, 500000)
raise_if_too_large(instance)
with pytest.raises(UP42Error, match=r".*[WRONG_INPUT_ERROR].*"):
instance.meta = {"dtype": "uint8"}
instance.count = 4
instance.shape = (500000, 500000)
raise_if_too_large(instance)
with pytest.raises(UP42Error, match=r".*[WRONG_INPUT_ERROR].*"):
instance.meta = {"dtype": "uint16"}
instance.count = 4
instance.shape = (500000, 500000)
raise_if_too_large(instance)
with pytest.raises(UP42Error, match=r".*[WRONG_INPUT_ERROR].*"):
instance.meta = {"dtype": "uint8"}
instance.count = 4
instance.shape = (10, 10)
raise_if_too_large(instance, 1)
with pytest.raises(UP42Error, match=r".*[WRONG_INPUT_ERROR].*"):
instance.meta = {"dtype": "float32"}
instance.count = 1
instance.shape = (28873, 22291)
raise_if_too_large(instance)
|
// Close stops the worker. Must be called once.
func (r *reacher) Close() error {
close(r.quit)
r.wg.Wait()
return nil
} |
Cosmological SUSY Breaking and the Pyramid Schemes
I review the ideas of holographic space-time (HST), Cosmological SUSY breaking (CSB), and the Pyramid Schemes, which are the only known models of Tera-scale physics consistent with CSB, current particle data, and gauge coupling unification. There is considerable uncertainty in the estimate of the masses of supersymmetric partners of the standard model particles, but the model predicts that the gluino is probably out of reach of the LHC, squarks may be in reach, and the NLSP is a right handed slepton, which should be discovered soon.
Introduction
All known consistent string theory models, in asymptotically flat space-time, are exactly supersymmetric. All well established examples of the AdS/QF T correspondence, with radius that can be parametrically large in string units, give flat space limits that are exactly supersymmetric.
The world around us is not supersymmetric. If a theory of quantum gravity is to have relevance to the world, it must explain to us how SUSY is broken, and extant string theory models do not do this. The rest of the papers in this volume treat SUSY breaking in "string theory" by taking a low energy quantum field theory derived as an approximation to a supersymmetric string model, and adding supersymmetry breaking terms to it, which are at best plausibly connected to excitations in string theory. I have criticized this procedure extensively . I believe instead that SUSY breaking is connected to the asymptotic structure of space-time via degrees of freedom that are thrown away in effective field theory. These are not high energy DOF, but rather very low energy excitations, which decouple from particles localized in the bulk because they are localized on the causal horizon. In flat and AdS space, the horizon actually recedes to infinity, and the horizon DOF need not be included in the Hilbert space, but in de Sitter (dS) space, the finite horizon leads to a finite amount of SUSY breaking.
The theory of Holographic Space-time explains the "empirical facts" of supersymmetry in string theory by choosing (in Minkowski space) the variables of quantum gravity to be the cut-off generators of the (generalized) super-BMS algebra. The super-BMS algebras are defined on the lightcone in 4 dimensional 1 Lorentzian momentum space. It has two components, corresponding to the top and bottom of the cone. The P 0 > 0 component has generators ψ + α (P, a), satisfying + = Z ab M µ (P, Q)σ µ αβ δ(P · Q), and ψ α (P, a)σ µ αβ P µ = .0 The delta function is non-zero only when P and Q are collinear, and so both positive multiples of (1, Ω), where Ω is a point on the unit 2-sphere. M µ points in the same direction and chooses the minimum of the two positive multipliers. The constraint on ψ says that it lies in the holomorphic spinor bundle over the two sphere, while it's conjugate lies in the antiholomorphic bundle. The local super-algebra, at fixed P , completed by the commutators of Z ab with psi(P, a), and with themselves, has a finite dimensional unitary representation generated by the action of the fermionic generators on a single state. One should think of the index a as labeling the eigen-functions of the Dirac operator on some compact manifold, with a cutoff that fixes the volume of that manifold in Planck units . Other geometric properties of the compact space are encoded in the super-algebra. For the purposes of this note, we can just set the Z ab to be a c-number times δ ab and let a, b run over the 16 components of a spinor in 7 dimensions. This is the quantum theory of eleven dimensional supergravity compactified on a 7 torus of Planck size. For fixed P , the super-BMS algebra is just the algebra of a single eleven dimensional graviton, and its superpartners, compactified on a Planck sized torus, and throwing away states whose mass is larger than the Planck mass. This includes all states charged under the multiple U(1) gauge symmetries of N = 8 SUGRA.
Scattering theory, at least in theories of gravity, is considered to be a map between the past and future representations of the super-BMS algebra Scattering states are states in which the operator valued measures ψ α (P, a) vanish outside of the endcaps of a finite number of Sterman-Weinberg cones, but are also non-vanishing at P = 0, except in small annuli surrounding the end-caps. For finite causal diamonds, these singular measures are replaced by a sum over a finite set of spinor spherical harmonics. The P = 0 modes are the degrees of freedom responsible for the entropy of horizons. For finite horizons they contribute finite terms to the Hamiltonian, but they decouple in the infinite horizon limit.
Cosmological SUSY breaking (CSB) is an attempt to implement the consequences of these abstract ideas in low energy effective field theory, and use them to guess at the correct model of Tera-scale physics. It leads to a quite restrictive set of models. The phenomenological analysis of these models is difficult because they must contain a new strongly coupled sector at the TeV scale, but a recent breakthrough has allowed me to make a lot of qualitative predictions for the spectrum of standard model super-partners. The models, which are called Pyramid Schemes , have a mechanism that produces large mixing between gauginos and composite adjoint chiral superfields. As a consequence, they predict heavy gauginos, squarks and right handed sleptons that should be in reach of the LHC, and a very complicated Higgs sector, whose properties are hard to extract from the brown muck of the new strong interactions. One can certainly get a 125 GeV Higgs, but it is not clear that its interactions are close enough to the standard model to fit the data. Questions of whether the weak scale is fine tuned in these models are beset by similar strong interaction obscurities.
The Pyramid Schemes also provide a novel solution to the strong CP problem, a novel dark matter candidate, a possible connection between the dark matter density and baryogenesis, and a possible pathway to explaining extra dark radiation (if the data indicating dark radiation improve to the point where it needs explanation) . They explain the absence of all dimension 4 and 5 operators which could mediate unobserved baryon and lepton violation, while permitting the dimension 5 seesaw operator, which gives rise to neutrino masses.
Cosmological SUSY Breaking
The zero energy generators of the super-BMS algebra provide a huge set of degrees of freedom, localized on the horizon (the conformal boundary ) of Minkowski space that are not incorporated in quantum field theory. They decouple from the S matrix of particles in Minkowski space. The basic idea of CSB is that the coupling between these states and particles remains finite, in the finite causal diamond of a single geodesic in de Sitter (dS) space. We view the radius of dS space as a tunable parameter 3 and ask how the coupling between particles and the horizon DOF leads to SUSY breaking.
To proceed we will have to understand a bit more about the geometry of dS space. The most important fact about dS space is that even a hypothetical observer, who lives for an infinite amount of time, can only see a finite distance R away. The entire history of the universe takes place inside a sphere whose radius can never be bigger than R. That sphere is actually running away from the observer at the speed of light. What is peculiar about the dS universe is that the expanding sphere describing where the backward light-cone from time T meets the forward light-cone from time −T , has a finite radius, even as T goes to infinity. This remains true if the universe is not exactly dS space, but began a finite time in the past, and becomes dS space in the asymptotic future. If observations on the acceleration of the universe are given their simplest interpretation in terms of a positive value for Einstein's cosmological constant (c.c.), then this is what is going on in the universe we live in. The radius of our cosmological horizon is about 10 61 Planck units 4 .
Now we want to think about implementing the idea that SUSY breaking comes from inter-actions with the cosmological horizon in effective field theory (EFT), the framework into which all physics below the Planck energy scale has been assumed to fit. In effective field theory, SUSY is a gauge symmetry and can only be broken spontaneously. Gauge symmetries have space-time dependent parameters ǫ(x). We can do a gauge transformation on any Lagrangian. If the original Lagrangian was not gauge invariant, the result is a new Lagrangian, where ǫ(x) is a new field. This new Lagrangian is gauge invariant, if we let both the original fields and ǫ(x) transform under the gauge transformation 5 . The gauge potential is a field A a µ (x). If the original Lagrangian was not gauge invariant, the semi-classical expansion reveals a massive excitation of the gauge field, and we say that the Schwinger-Anderson-Higgs-Brout-Englert-Guralnik-Hagen-Kibble phenomenon has taken place. In the case of supersymmetry, the gauge parameters are a fermionic spinor field ǫ α (x). The massive excitation is that of the gravitino field ψ µα (x) and has spin 3/2. It's mass is denoted m 3/2 .
The mass of the gauge excitation is proportional to the gauge coupling. In the limit that the gauge coupling goes to zero, the Higgs phenomenon morphs into the phenomenon of Nambu-Goldstone spontaneous symmetry breaking (whence the somewhat inaccurate name spontaneous breaking of gauge symmetry for the Higgs phenomenon). The longitudinal part of the massive gauge field becomes the Nambu-Goldstone particle associated with symmetry breaking. The mass is the product of the gauge coupling, and the value of an order parameter, F , whose size depends on the energy scale at which the symmetry is broken in the limit of zero gauge coupling. In the case of supersymmetry, the gravitino is the symmetry partner of the graviton, and the relevant coupling is 1 m P 6 The order parameter F has dimensions of squared mass, and determines the typical difference in squared masses between bosons and their supersymmetric partner fermions, in non-gravitational supermultiplets. The gravitino mass is Super-Poincare invariance appears naturally in N = 1 SUGRA only in the presence of a gauged discrete complex R symmetry, which sets the constant in the super-potential to zero. Indeed, super-symmetry is compatible with general negative values of the cosmological constant. This has been abundantly confirmed by the AdS/CFT correspondence, in which the quantum theory of many space-times of the form where K is a compact manifold, and Anti-deSitter space is the maximally symmetric space with negative c.c.. SUSY is incompatible with positive c.c. . The formula for the c.c. in SUGRA is F is the SUSY breaking order parameter, and W is the order parameter for R symmetry. R symmetry is the subgroup of the discrete symmetry group of the model, which acts on the 5 This is straightforward for a gauge group with one parameter. If there are multiple parameter ǫ a (x) and the different transformations do not commute with each other the formalism is more complicated, but the results are the same . 6 m P = MP √ 8π ∼ 2 × 10 18 GeV, is called the reduced Planck mass.. generators of the supersymmetry algebra 7 . CSB depends on the hypothesis that the interactions with the horizon generate R violating terms in the effective action, which in turn lead to spontaneous SUSY breaking. It is also assumed that the gravitino is the lightest particle carrying R charge.
Then the leading order diagrams that could lead to R violating interactions coming from the horizon, have a gravitino line going out to the horizon and another coming back, violating R symmetry by two units . These diagrams are all proportional to We can think of this as a term in second order perturbation theory in the interaction via which the horizon emits and absorbs gravitinos, so that where the sum is over all states of the horizon with which the gravitino can interact. The energy denominators are all of order 1 R dS because the e π(R dS M P ) 2 states of the horizon, live in a band of this size.
The horizon is a null surface and the gravitino can propagate on it for a proper time of order 1 m 3/2 . According to conventional Feynman diagrams, written in the Fock-Schwinger proper time parametrization, it performs a random walk on the surface, with a step size in proper time given by the UV cutoff. If, as we will assume, the theory has extra dimensions large in 10 or 11 dimensional Planck units, then the step size is given by the higher dimensional Planck scale. Following Witten we will assume that this is the scale of coupling unification which is 2 × 10 16 GeV. The entropy of the horizon states with which the gravitino interacts is proportional to the area in 4 dimensional Planck units that it covers in proper time 1 m 3/2 , and is c 1 M U m 3/2 . Thus the full amplitude is proportional to Assuming that m 3/2 goes to zero as a power of R −1 dS , we find a contradiction unless the power is precisely R − 1 2 . Indeed, if the mass is assumed to go to zero more rapidly than this, the formula for R breaking interactions blows up exponentially as R dS → ∞, while if it goes to zero more slowly the strength of these interactions vanishes exponentially.
Using the reduced Planck mass M 2 P = 8πm 2 P and the relation m P R −1 dS = Λ/3, we get For a unification scale M U = 2 × 10 16 GeV, this gives More precisely: R symmetry is the coset of the discrete symmetry group G by the subgroup H, which leaves the SUSY generators invariant. It can be shown that H is a normal subgroup of G, so that G/H is a group, called the discrete R symmetry.
with K a constant of order one. The SUSY breaking order parameter is thus This is a remarkably low value for the SUSY breaking scale, a fact which drives much of the analysis below.
The structure of diagrams contributing to the R violating terms in the Lagrangian implies that these terms do not satisfy the usual constraints of technical naturalness, familiar from QFT and perturbative string theory. Any diagram with more than two gravitinos, or with heavier R charged particle states, mediating between the local vertex and the horizon, will be exponentially suppressed. Diagrams involving R neutral exchanges with the horizon give contributions which have a finite limit as R dS → ∞, and are small corrections to terms already incorporated in the Λ = 0 effective Lagrangian.
As a consequence, apart from the R symmetry itself, symmetries, or approximate symmetries of the Λ = 0 model, are also preserved by the R-violating terms. We exploit this in the following way: we choose the R symmetry to forbid all terms of dimension 4 or 5 in the Λ = 0 model, which violate B and L, apart from the dimension 5 superpotential which generates neutrino masses of roughly the right order of magnitude 8 . Insertions of higher dimension B and L violating operators into a diagram with a pair of gravitino lines going out the horizon cannot generate the lower dimension operators, because the extra gravitino loop is cut-off at the SUSY breaking scale or below, by its space-time structure. We will see later that, with one extra mild assumption, this mechanism for R violation also provides a novel solution to the strong CP problem.
The Pyramid Schemes
We now want to build an effective field theory, compatible with current experiment, and with the mechanism of CSB. It must contain the MSSM, as well as an uncontrained goldstino superfield X 9 , and must preserve SUSY and a discrete R symmetry, but spontaneously violate SUSY when R breaking terms are added. It must be consistent with the bounds on super-partner masses.
If the only dynamically generated scale in the theory is the QCD scale, it is impossible to do this. The most general renormalizable Lagrangian is that of the Minimal Supersymmetric Standard Model (MSSM), with the additional superpotential 10 .
where C is a cubic polynomial. This always has supersymmetric solutions 11 . Even if we could generate a non-zero F X the gluino mass generated by this model would be too small to be compatible with experimental bounds. Non-renormalizable corrections to this Lagrangian would be suppressed by powers of M U or m P and cannot help with these problems.
To remedy the gluino mass problem, we must include a strongly coupled hidden sector, some of whose fields carry color, in order to generate a coupling between X and the QCD field strength W a α , which can give a large enough gluino mass. This is the only way to generate a new low energy scale in a natural manner. The scale Λ 3 of this new strongly coupled sector has to be close to the SUSY breaking scale, since F X Λ 3 will be the natural scale that enters into the formula for the gluino mass, and F X is so low. The Pyramid Scheme models we propose, have a natural explanation for this coincidence of scales.
The necessity for new colored particles is potentially problematic, if we wish to preserve coupling unification. The obvious solution to this is to include complete multiplets of some unified gauge group, but one must also be sure that the gauge couplings have no Landau poles below the unification scale 12 . This puts restrictions on the size of the new strong gauge group.
Seiberg's general analysis of the IR behavior of asymptotically free SUSY gauge theories enables us to rule out many possibilities. Initially, I was led to the N F = N C = 5 theory as the unique possibility that could preserve SU(5) unification, but a careful analysis of two loop effects showed that the model had Landau poles below the unification scale. I cannot claim to have made an exhaustive survey, but at the moment the only class of models that survives all of these simple tests are the Pyramid Schemes .
The Pyramid Schemes utilize Glashow's Trinification , an SU (3) permutes the three SU(3) groups and ensures equality of couplings in the symmetry limit. I will have to assume the reader is familiar with this, and refer to the ith subgroup as SU i (3). Color is embedded in SU 3 (3) and the weak SU(2) in SU 2 (3). 10 chiral fields of each generation are assumed to obtain mass at the unification scale. The Higgs fields H u,d also belong to an incomplete multiplet, but we do not specify what it is. More generally, we make no attempt to explain details of physics at the unification scale.
The quiver diagram of Trinification is a chiral triangle. The simplest extension of it answering our needs extends the triangle to a Pyramid with triangular base (a tetrahedron). The fields connecting the apex of the Pyramid to the base transform in the vector like representation T i ⊕T i ∈ (F,3 i ) ⊕ (F , 3 i ) and are called trianons. F is some representation of the Pyramid Group G P . Both the group and the representation must be fairly small, to preserve standard model coupling unification.
While I don't pretend to have made an exhaustive search, the only examples I've found that work are G P = SU P (k) with k = 3, 4 and F the fundamental representation. The case k = 3 is more attractive in a number of ways. The minimal R symmetry group that works for k = 3 is Z 8 , compared to Z 14 for k = 4. Furthermore, there's a natural explanation for 11 A meta-stable SUSY violating solution can be acceptable only if the probability for transitions into a Big Crunch by tunneling into the basin of attraction of the SUSic minimum is the inverse recurrence time for dS space . There are not enough parameters in the model to engineer this. 12 Some authors like to preserve coupling unification to two loop accuracy. Two loop corrections are of the same size as one loop threshold corrections at the unification scale, so I have never been very impressed by the "better fit" given by the two loop results. I will try only to preserve the one loop results and the fact that the two loop corrections are small. the coincidence between Λ 3 and the scale of SUSY breaking for k = 3, and an interesting dark matter candidate. So far, there are no analogous advantages for k = 4.
The Singlet Sector
The R symmetry is chosen to forbid all relevant super-potential terms, which would otherwise be expected to be of order M U or greater. Another way to say this is that we insist that the Λ = 0 theory be technically natural. The super-potential is thus where the second term is the familiar standard model super-potential, with no µ term, and no terms violating B or L. The expression T 3 i refers to the cubic invariant of the (3 P ,3 i ) representation. The unified group would set all these couplings equal, but unification scale symmetry breaking could easily change that, without ruining the success of one loop gauge coupling unification.
The R breaking diagrams with gravitinos propagating out to the horizon can induce terms of the form The model has a SUSY preserving minimum both with and without the extra terms, and so does not satisfy the requirements of CSB. This indicates the necessity of introducing other low energy fields.
The simplest way to do this, and perhaps the only one, which doesn't disturb the running of the standard model couplings, is to add singlets under the full gauge group. It seems that the minimal number is 3 fields S i . There is no reason to assume that the index here transforms under the Z 3 of the trinification group, though it is suggestive of interesting structure at the unification scale. The R symmetry action on the S i can be chosen so that the trilinear couplings are allowed. However, we will also impose an additional discrete symmetry, which does not act on the supersymmetry generators, and ensures that the matrix C ij = C ijk S k has a zero eigenvalue for any choice of S k . The full Λ = 0 super-potential, W S + W 3 + W std has a SUSic, R symmetric minimum when all fields vanish. Although the cubic super-potential has flat directions, these are all lifted by non-renormalizable R symmetric corrections to the Kahler and super-potentials, scaled by the unification or Planck scales. Finally, with the given field content, the gauge couplings remain small at low energy so that the model preserves both SUSY and R symmetry. Thus, this low energy model is consistent with being the low energy limit of the Λ → 0 limit of a model of stable dS space. We now add to the superpotential. The equations for a supersymmetric point become (T ) 2 j is the bilinear obtained by differentiating the trilinear invariant w.r.t.T j . As noted, we can choose the R symmetry, plus another discrete symmetry which does not act on the supersymmetry generators, to ensure that the matrix is not invertible for any S i . We further assume that the coefficients in W / R are chosen so that µ ij shares the zero modes of C ij and that the S i independent terms in ∂W ∂S i have components in the zero mode subspace. In this case, there can be no SUSic minimum. The constraints on W / R , do not follow from symmetries, but these terms arise from a very special class of diagrams. It's only by imposing these constraints that we obtain a low energy model compatible with an underlying gravitational model that breaks SUSY.
To understand fully the dynamics of SUSY breaking in this model, we must first make sure that the SU P (3) gauge theory indeed becomes strongly coupled, and find the relation between its dynamical scale Λ 3 and the CSB SUSY breaking scale. The SU(3) P Lagrangian at high energies is SUSY QCD, with N F = 3N C . Its one loop beta function vanishes, but in the absence of other couplings the two loop beta function is IR free. However, if the couplings g i andg i are all equal to 4/3× the gauge coupling, then we have a line of fixed points. This line is attractive. We imagine that, at the unification scale M U the effective theory lies in the domain of attraction of this line and is rapidly sent to a point where the coupling is relatively strong, but barely in the perturbative regime 13 . The couplings then remain fixed until we reach the highest mass threshold of the trianon fields. That mass scale is set by the parameters m i , which come from interactions with the horizon. These three parameters are of comparable order of magnitude and all vanish like Λ 1/8 m 1/2 P , when the c.c. is sent to zero. We do not know how to calculate them more accurately than that. For phenomenological reasons, we will assume that the lightest mass is m 3 , the mass of the colored trianon.
Below the first two trianon thresholds, the lagrangian has N F = N C and is asymptotically free. We have assumed that the fixed line value of the gauge coupling is fairly large, so the confinement scale Λ 3 is slightly below the masses m 1,2 of the two colorless trianons. We can think of the relations between these scales as roughly analogous to that between the charmed quark mass and the QCD scale, m i ≈ 4πΛ 3 . We will also assume that m 3 is of order Λ 3 , somewhat analogous to the strange quark mass in QCD. This means that we can analyze the low energy dynamics in terms of chiral perturbation theory, which in this case means Seiberg's effective Lagrangian .
The colored trianons are confined into a three by three matrix M of pyrmesons, which transform as an octet and singlet of color, and singlet pyrmabaryon, B, and anti-pyrmabaryon, B , fields. The effective super-potential on the moduli space is W 0 is a constant added in order to tune the c.c. to its observed value. It does not affect the low energy dynamics, which is independent of the Planck mass to first approximation, once we fix the relevant couplings m i , µ, and F i . In this formula we've rescaled all fields and parameters by powers of Λ 3 , to make them dimensionless. Apart from this rescaling, C(S) is the cubic polynomial in the singlets, that appeared in the super-potential above the scale Λ 3 . L is a Lagrange multiplier field. Before analyzing the predictions of this model, we note that something very similar results if we set kappa i =kappa i = 0 for i = 1 or i = 2. The UV model no longer has a fixed line, but the couplings vary slowly. In particular, although the gauge coupling is now IR free, we can still have a strong coupling scale Λ 3 without producing a Landau pole below M U , as long as Λ 3 < 2 TeV 14 . This is interesting because a model that preserves one of the pyrma-baryon symmetries at the renormalizable level, allows the lightest particle carrying this quantum number to be a dark matter candidate if an appropriate asymmetry is generated in the very early universe. We will discuss this further below.
Crude Estimates of MSSM Super-partner Masses
The first order of business is to estimate the masses of supersymmetric partners of standard model particles in this model. Here I've recently been surprised. I'd initially thought that the Pyramid scheme was a form of direct gauge mediation . In fact, for the gluino at least, the gauge mediated masses are much smaller that a contribution from mixing between the gaugino and the pyrmeson octet. Write where the fields are dimensionless a λ a represent the Gell-Mann matrices. These fields, are the pyrmesons. Consider an operator The function f and its derivatives have a factor of the QCD coupling g 3 , but no loop factors. They somewhat analogous to hadron magnetic moments in QCD, with the insertion of one weakly coupled field into an effective Lagrangian for composites. The QCD fine structure constant ∼ .1 gives g 3 ∼ 1.4, so this is nominally of the same size as contributions to the Majorana mass (These formulae must be rescaled by the Kahler potential to get physical masses), because F M , Λ 3 and m 3 are all in the TeV range. The result is a pair of octet Majorana fermions, whose masses and mass splitting are all of order |F M | 2 Λ 3 3 . Given the rules of CSB, the numerator is bounded by about 10 14 (Gev) 4 . RG running gives an upper bound on Λ 3 < 2 × 10 3 GeV. The lower bound is harder to determine but is related to the fact that we haven't seen any of the pyrma-hadrons and so is probably about 1 TeV. Thus, the lightest mass eigenstate with the quantum numbers of the gluino is between 10 − 100 TeV. It is a mixture, with order one mixing angle, of the gluino interaction eigenstate and the composite octet fermion in the N a super-multiplet. The conventional gauge mediated Majorana gluino mass is suppressed by a factor 1 16π 2 relative to these masses . The dominant contribution to squark squared masses comes from a one QCD loop, convergent, diagram, as in super-soft models . The squark masses are universal and are of order 16π 2 ∼ 900 − 9000GeV. The lower reaches of these estimates mean that squarks but not gluinos will be within the reach of the LHC, while the upper values bode ill for near term experimental detection of these particles 15 .
A similar formula is also appropriate for the other gauginos, though here the argument is more complicated. Basically, using arguments analogous to those invoked when discussing mixing between the photon and strongly interacting vector mesons, one counts factors of g i , and 4π, with everything else determined by dimensional analysis and the scale Λ 3 . In this kind of bilinear mixing, there are no loop factors. One calculates the two point function of the gauge field strength W i α and the derivative of the Trianon composite D α M i , (here i is the adjoint index of the i-th gauge group in the standard model). Then one argues that when g i = 0 there is a stable adjoint fermion, with a Majorana mass of order 2m 2 for the SU(2) triplet, and the lighter of 2m 1 and 2m 2 for the U(1) adjoint (since both of the colorless trianons have U(1) couplings). Mixing between the gauginos and these states is a seesaw mechanism , giving Majorana gaugino masses of order It's not completely clear which F terms will give the dominant contribution here. The masses are probably less than a TeV, though there's considerable uncertainty in these estimates. Slepton masses will be down from this by the square root of a loop factor α i 4π .
One of the right handed sleptons is thus the NLSP. If the bino weighs a TeV this crude estimate gives right handed slepton masses of order 30 GeV, which is already ruled out. Indeed, for the decay topology of slepton going to Goldstino, sleptons are ruled out up to about 260 GeV and the next run of the LHC might explore another 100 GeV in mass 16 . If we took our estimates seriously, this would push the bounds on bino and chargino masses up to about 9 and 27 TeV. However the strong SU P (3) uncertainties do not warrant such drastic conclusions. I cannot emphasize too strongly how much uncertainty there is in these estimates, but they lead us to expect the discovery of right handed sleptons in the near future. There is one caveat to the claim that a RH slepton is the NLSP, since we have not studied the masses of all the states in the singlet sector. However, the diagrams contributing to slepton masses are lepton flavor blind. The coupling of sleptons to the singlet sector is mediated by the Higgs boson and we know that the Higgs couplings of the leptons are small, ranging from 10 −5 to 10 −2 . Decays of a slepton into a lepton and a hypothetical light singlino, would occur outside the LHC detectors. Thus, even if it turns out that the NLSP is a singlino, the light right handed sleptons predicted by the model should be observed.
Dark Matter
In the original paper on the Pyramid Scheme, Fortin and I got caught up in the excitement surrounding positron excesses and other dark matter signatures. The majority opinion seems to be that these excesses are no longer considered to be relevant to dark matter. Since then I've returned to the simple idea that dark matter is one of the pyrma-baryons of the strongly coupled Pyramid sector. This requires that we omit one pair of trilinear couplings from the underlying Lagrangian, and one can choose the R symmetry properties of the model to make this natural. We have seen that the attractive RG structure of the model is preserved when we do this, as long as the scale Λ 3 < 2 TeV.
We've seen above, that we want to keep the trilinear couplings of the colored trianon. This implies that (if the dark matter is the fermion in the supermultiplet) dark matter has a magnetic moment. This is an old idea, which goes back to technicolor and has potential observational consequences .
In order to get the right dark matter density, we need to postulate an asymmetry generated in the very early universe. This is very easy to do, but has no predictive power. However, it opens the door to a connection between the dark matter density and the baryon asymmetry of the universe. The standard model couplings of the trianons lead to a coupling which implies that an asymmetry in one quantum number will give rise to a chemical potential for the other. If this chemical potential is substantial when the interactions that violate the corresponding quantum number go out of equilibrium, then spontaneous (pyrma) baryogenesis will occur , thus connecting the dark matter and baryon densities of the universe. There may also be a possible dark matter candidate in the singlet sector of the model, about which I understand too little to make a definitive statement. Presumably, if it exists, it would be much more like a WIMP. If this is the dark matter, we can restore the possibility of UV equality of all the trianon trilinear couplings, which is somewhat more elegant.
Dark Radiation
The gravitinos in any model implementing CSB are very light and were certainly relativistic at the eras where the CMB and structure formation may indicate the need for more relativistic species. Standard estimates indicate that such light gravitinos decouple before the electroweak phase transition and contribute much less than a neutrino species to the evolution of the universe. However, non-thermal repopulation of the gravitinos by late decaying NLSPs, could generate the required excess. This could only occur if the NLSP was part of the singlet sector, because our bounds on light MSSM super-partners rule out such late decays.
The Strong CP Problem
As pointed out in the Pyramid Scheme provides a novel solution to the strong CP problem. When Λ = 0 the model has many U(1) symmetries at the renormalizable level, which allow us to rotate away all CP violating phases except the CKM phase. This would lead to an axion with a decay constant that has been ruled out be experiment. However, the R symmetry violating interactions coming from the horizon break all these symmetries and give the axion a large mass. Normally, when we try to do this in QUEFT, the U(1) breaking terms re-introduce CP violating phases.
In the Pyramid scheme, these terms come from a very special class of diagrams, where two gravitinos are exchanged with the horizon. The part of these diagrams localized near the origin has all the symmetries of the Λ = 0 theory, and the CP violating θ QCD induced through the CKM matrix is tiny. The other end of the gravitino lines is more mysterious, but since it lies on the horizon it is at a very high local temperature, of order the unification scale. Thus, if the fundamental origin of CP violation is spontaneous breakdown, at scales ≪ M U there will be no CP violation near the horizon. Thus, the phases in all the R breaking diagrams are small, without either fine tuning or an axion.
The Higgs Potential and the Electroweak Scale
Neglecting loops involving standard model fields, the Higgs potential in the Pyramid Scheme is . The Kahler potential depends on the singlet fields through the combinations α k i S i , for k = 1, 2. This comes from integrating out the colorless trianons. In this part of the Kahler potential was calculated in zeroth order perturbation theory in the Pyramid coupling. This approximation is not really justified because the masses of the trianons are just a few times Λ 3 . The parameters F i and µ come from interactions with the horizon. We expect them to be of order a few TeV, but do not have a way to calculate them with any precision.
One should also include contributions to the Higgs potential from stop loops, and, given the size of SU(2) × U(1) preserving gaugino masses that we have estimated, loops of TeV scale gauginos. We will also want to choose the couplings β i to be fairly large, which means that loops of singlets will also be important in determining the Higgs potential.
In we included some, but not all of these effects, many of which push in opposite directions. We found that we could fit the LHC bounds but that this required a few percent tuning. Given our new insights into gaugino masses, and the singlet loops, which we simply forgot in , the problem becomes more complicated. In addition, the large β i present us with the possibility of large mixing between singlets and the lightest Higgs. Neglect of the complicated dependence of the Kahler potential on the S i was unjustified.
Note that the tuning in the Pyramid scheme is not really the same as the oft discussed little hierarchy problem of the MSSM. It comes from the fact that the Higgs potential above contains a number of relevant parameters whose natural scale in CSB is multiple TeV. The dimensionless parameters are bounded from above in order to avoid Landau poles below the unification scale. On the other hand, we have a rather complicated function of 6 complex variables (the neutral Higgs fields h u,d , the singlet pyrmeson M, and the three S i fields ) to minimize, so it seems premature to conclude that a tuning of one part in a hundred is unnatural. It is, at any rate, too complex to attempt here.
Conclusions
The Pyramid Schemes are the only low energy effective field theories compatible with both the very low scale of SUSY breaking required by CSB, extant experimental data, and standard model gauge coupling unification. They contain a new strong coupling gauge theory, with fields carrying standard model quantum numbers. The most attractive candidate so far has a strongly coupled SU P (3) gauge group.
The strong interactions complicate the computation of the Higgs potential and parts of the spectrum, but terms that give rise to TeV Dirac masses for gluinos (and probably the electroweak gauginos as well) enable us to make a few robust predictions • The MSSM spectrum can be characterized as "flipped mini-split SUSY", with squarks and sleptons systematically lighter than gauginos. Gluinos will probably not be detected at the LHC, but squarks should show up in the next run, with production and decay modes characteristic of the gluino decoupling limit. The entire Higgs sector is complicated by mixing with the singlet fields in the low energy model. This spectrum of colored super-partners is predicted by the model. It's realized more generally in any model in which there are adjoint chiral superfields, Dirac masses comparable to the supersymmetric adjoint mass term, and small SUSY breaking Majorana terms for the gaugino. Models with adjoint fields that are elementary up to scales much larger than the SUSY breaking scale, will have problems with gauge coupling unification.
• The NLSP is either a right handed slepton, or something from the singlet sector, but in any case the right handed sleptons are "detector stable" and should be seen soon at the LHC, since they decay to leptons and very light gravitino LSPs. The crudest calculations put their masses 9 times lower than the LHC lower bound. The simplest way to solve this problem is to assume that the bino and charged winos are at 9 and 27 TeV, but there is so much uncertainty in these estimates from hidden sector strong interactions that one should not take these drastic values that seriously. Anyone who has followed my work on the Pentagon and Pyramid schemes will know that I'd previously estimated that the bino was the NLSP and that charginos should be found at the LHC. The recent discovery of operators that give Dirac gaugino masses has changed everything in a dramatic way.
Apart from that, the Pyramid Schemes retain the flavor structure of gauge mediation . The only violation of rotation symmetries among the generations comes from Standard Model Yukawa couplings, and the mechanism determining the pattern of those is assumed to operate at very high scale. Dimension 4 and 5 baryon and lepton number violation is eliminated by a combination of the discrete R symmetry of the Λ = 0 model and the special properties of the R breaking operators coming from the horizon. A similar conspiracy solves the strong CP problem. The discrete R symmetry imposes an accidental U P Q (1) Peccei-Quinn symmetry on the renormalizable terms of the Λ = 0 theory, and the special nature of discrete R violation, combined with the assumption that CP is spontaneously broken at a scale below the unification scale, guarantee that the would be axion is lifted to a high mass, without introducing new phases into low energy couplings. The Pyramid Schemes also have interesting implications for cosmology. If we assume one of the pyrma-baryon symmetries is preserved at the renormalizable level, then the dark matter candidate is a standard model singlet fermion, with a mass of 10s of TeV and a commensurate magnetic dipole moment. The correct dark matter density is obtained by assuming an appropriate primordially generated asymmetry, and there is a potential connection between the dark matter density and the ordinary baryon asymmetry via a form of spontaneous baryogenesis .
On the other hand it is possible, though not guaranteed, that there can be a light state in the singlet sector that could serve as dark matter. In this case we would be able to have an elegant and symmetric theory at the unification scale, which would explain the coincidence of scales between Λ 3 and SUSY breaking. The model with only two of the three renormalizable PB violating couplings does the same job, but is less elegant.
If the singlet dark matter candidate were sufficiently light, it could be the NLSP, and its stability only due to R parity. Then it could also be a form of late decaying dark matter, which would produce a dark radiation density in the form of non-thermal gravitinos. It may be that cosmological data will eventually require us to explain such a density of dark radiation. The very light gravitinos of the Pyramid Scheme are hard to detect, but beg to be used as dark radiation. Much more investigation along these lines is needed. |
'use strict'
import { Vec2 } from '../../node_modules/natlib/Vec2.js'
import { FAILURE_MOVED_PICTURE } from '../Background.js'
import { Level, LevelState } from '../Level.js'
import { MovingWebsite } from '../MovingWebsite.js'
export class Moving extends Level {
constructor(startingPoint: Vec2, curtain = 0) {
super(startingPoint, curtain)
this.website = new MovingWebsite
this.curtainPicture = FAILURE_MOVED_PICTURE
}
update() {
super.update()
if (this.state === LevelState.WAITING || this.state === LevelState.FAILING || this.state === LevelState.WINNING) {
this.website.update()
}
}
getIndex() {
return 6
}
}
|
def send_new_user_email(request, user):
new_user_form = PasswordResetForm({"email": user.email})
try:
if new_user_form.is_valid():
new_user_form.save(**{
"use_https": True,
"email_template_name": "joplin_UI/email/new_user.txt",
"html_email_template_name": "joplin_UI/email/new_user.html",
"subject_template_name": "joplin_UI/email/new_user_subject.txt",
"domain_override": settings.BASE_URL,
"extra_email_context": {
"styleguide_url": os.getenv('STYLEGUIDE_URL', ""),
}
})
except SMTPException as e:
messages.error(request, f"Failed to send email to {user.email}.") |
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
ds = ['saturday', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday',]
f = raw_input()
s = raw_input()
fday = ds.index(f)
sday = ds.index(s)
found = False
for month in xrange(1, 12):
pos = fday
for d in xrange(days[month]):
pos += 1
if pos == len(ds):
pos = 0
if pos == sday:
print "YES"
found = True
break
if not found:
print "NO" |
/**
* Checks online for updates. Blocking method.
*
* @param plugin
* Plugin to check for.
* @return The update result.
* @throws IOException
* If an IO error occurs.
*/
public UpdateCheckResult checkForUpdatesSync(Plugin plugin) throws IOException {
String currentVersionEncoded = URLEncoder.encode(plugin.getDescription().getVersion(), "UTF-8");
URL url = new URL(UPDATE_URL + "?version=" + currentVersionEncoded);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Content-Type", "application/json");
UserAgent.setFor(plugin, connection);
try (InputStream stream = connection.getInputStream()) {
JsonObject object = jsonParser.parse(new InputStreamReader(stream, Charsets.UTF_8)).getAsJsonObject();
return new UpdateCheckResult(object);
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
} |
<reponame>tgrogers/accel-sim-framework<filename>gpu-simulator/gpgpu-sim/src/stream_manager.cc<gh_stars>0
// Copyright (c) 2009-2011, <NAME>, <NAME>
// The University of British Columbia
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. Neither the name of
// The University of British Columbia nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "stream_manager.h"
#include "../libcuda/gpgpu_context.h"
#include "cuda-sim/cuda-sim.h"
#include "gpgpu-sim/gpu-sim.h"
#include "gpgpusim_entrypoint.h"
unsigned CUstream_st::sm_next_stream_uid = 0;
CUstream_st::CUstream_st() {
m_pending = false;
m_uid = sm_next_stream_uid++;
pthread_mutex_init(&m_lock, NULL);
}
bool CUstream_st::empty() {
pthread_mutex_lock(&m_lock);
bool empty = m_operations.empty();
pthread_mutex_unlock(&m_lock);
return empty;
}
bool CUstream_st::busy() {
pthread_mutex_lock(&m_lock);
bool pending = m_pending;
pthread_mutex_unlock(&m_lock);
return pending;
}
void CUstream_st::synchronize() {
// called by host thread
bool done = false;
do {
pthread_mutex_lock(&m_lock);
done = m_operations.empty();
pthread_mutex_unlock(&m_lock);
} while (!done);
}
void CUstream_st::push(const stream_operation &op) {
// called by host thread
pthread_mutex_lock(&m_lock);
m_operations.push_back(op);
pthread_mutex_unlock(&m_lock);
}
void CUstream_st::record_next_done() {
// called by gpu thread
pthread_mutex_lock(&m_lock);
assert(m_pending);
m_operations.pop_front();
m_pending = false;
pthread_mutex_unlock(&m_lock);
}
stream_operation CUstream_st::next() {
// called by gpu thread
pthread_mutex_lock(&m_lock);
m_pending = true;
stream_operation result = m_operations.front();
pthread_mutex_unlock(&m_lock);
return result;
}
void CUstream_st::cancel_front() {
pthread_mutex_lock(&m_lock);
assert(m_pending);
m_pending = false;
pthread_mutex_unlock(&m_lock);
}
void CUstream_st::print(FILE *fp) {
pthread_mutex_lock(&m_lock);
fprintf(fp, "GPGPU-Sim API: stream %u has %zu operations\n", m_uid,
m_operations.size());
std::list<stream_operation>::iterator i;
unsigned n = 0;
for (i = m_operations.begin(); i != m_operations.end(); i++) {
stream_operation &op = *i;
fprintf(fp, "GPGPU-Sim API: %u : ", n++);
op.print(fp);
fprintf(fp, "\n");
}
pthread_mutex_unlock(&m_lock);
}
bool stream_operation::do_operation(gpgpu_sim *gpu) {
if (is_noop()) return true;
assert(!m_done && m_stream);
if (g_debug_execution >= 3)
printf("GPGPU-Sim API: stream %u performing ", m_stream->get_uid());
switch (m_type) {
case stream_memcpy_host_to_device:
if (g_debug_execution >= 3) printf("memcpy host-to-device\n");
gpu->memcpy_to_gpu(m_device_address_dst, m_host_address_src, m_cnt);
m_stream->record_next_done();
break;
case stream_memcpy_device_to_host:
if (g_debug_execution >= 3) printf("memcpy device-to-host\n");
gpu->memcpy_from_gpu(m_host_address_dst, m_device_address_src, m_cnt);
m_stream->record_next_done();
break;
case stream_memcpy_device_to_device:
if (g_debug_execution >= 3) printf("memcpy device-to-device\n");
gpu->memcpy_gpu_to_gpu(m_device_address_dst, m_device_address_src, m_cnt);
m_stream->record_next_done();
break;
case stream_memcpy_to_symbol:
if (g_debug_execution >= 3) printf("memcpy to symbol\n");
gpu->gpgpu_ctx->func_sim->gpgpu_ptx_sim_memcpy_symbol(
m_symbol, m_host_address_src, m_cnt, m_offset, 1, gpu);
m_stream->record_next_done();
break;
case stream_memcpy_from_symbol:
if (g_debug_execution >= 3) printf("memcpy from symbol\n");
gpu->gpgpu_ctx->func_sim->gpgpu_ptx_sim_memcpy_symbol(
m_symbol, m_host_address_dst, m_cnt, m_offset, 0, gpu);
m_stream->record_next_done();
break;
case stream_kernel_launch:
if (m_sim_mode) { // Functional Sim
if (g_debug_execution >= 3) {
printf("kernel %d: \'%s\' transfer to GPU hardware scheduler\n",
m_kernel->get_uid(), m_kernel->name().c_str());
m_kernel->print_parent_info();
}
gpu->set_cache_config(m_kernel->name());
gpu->functional_launch(m_kernel);
} else { // Performance Sim
if (gpu->can_start_kernel() && m_kernel->m_launch_latency == 0) {
if (g_debug_execution >= 3) {
printf("kernel %d: \'%s\' transfer to GPU hardware scheduler\n",
m_kernel->get_uid(), m_kernel->name().c_str());
m_kernel->print_parent_info();
}
gpu->set_cache_config(m_kernel->name());
gpu->launch(m_kernel);
} else {
if (m_kernel->m_launch_latency) m_kernel->m_launch_latency--;
if (g_debug_execution >= 3)
printf(
"kernel %d: \'%s\', latency %u not ready to transfer to GPU "
"hardware scheduler\n",
m_kernel->get_uid(), m_kernel->name().c_str(),
m_kernel->m_launch_latency);
return false;
}
}
break;
case stream_event: {
printf("event update\n");
time_t wallclock = time((time_t *)NULL);
m_event->update(gpu->gpu_tot_sim_cycle, wallclock);
m_stream->record_next_done();
} break;
case stream_wait_event:
// only allows next op to go if event is done
// otherwise stays in the stream queue
printf("stream wait event processing...\n");
if (m_event->num_updates() >= m_cnt) {
printf("stream wait event done\n");
m_stream->record_next_done();
} else {
return false;
}
break;
default:
abort();
}
m_done = true;
fflush(stdout);
return true;
}
void stream_operation::print(FILE *fp) const {
fprintf(fp, " stream operation ");
switch (m_type) {
case stream_event:
fprintf(fp, "event");
break;
case stream_kernel_launch:
fprintf(fp, "kernel");
break;
case stream_memcpy_device_to_device:
fprintf(fp, "memcpy device-to-device");
break;
case stream_memcpy_device_to_host:
fprintf(fp, "memcpy device-to-host");
break;
case stream_memcpy_host_to_device:
fprintf(fp, "memcpy host-to-device");
break;
case stream_memcpy_to_symbol:
fprintf(fp, "memcpy to symbol");
break;
case stream_memcpy_from_symbol:
fprintf(fp, "memcpy from symbol");
break;
case stream_no_op:
fprintf(fp, "no-op");
break;
}
}
stream_manager::stream_manager(gpgpu_sim *gpu, bool cuda_launch_blocking) {
m_gpu = gpu;
m_service_stream_zero = false;
m_cuda_launch_blocking = cuda_launch_blocking;
pthread_mutex_init(&m_lock, NULL);
m_last_stream = m_streams.begin();
}
bool stream_manager::operation(bool *sim) {
bool check = check_finished_kernel();
pthread_mutex_lock(&m_lock);
// if(check)m_gpu->print_stats();
stream_operation op = front();
if (!op.do_operation(m_gpu)) // not ready to execute
{
// cancel operation
if (op.is_kernel()) {
unsigned grid_uid = op.get_kernel()->get_uid();
m_grid_id_to_stream.erase(grid_uid);
}
op.get_stream()->cancel_front();
}
pthread_mutex_unlock(&m_lock);
// pthread_mutex_lock(&m_lock);
// simulate a clock cycle on the GPU
return check;
}
bool stream_manager::check_finished_kernel() {
unsigned grid_uid = m_gpu->finished_kernel();
bool check = register_finished_kernel(grid_uid);
return check;
}
bool stream_manager::register_finished_kernel(unsigned grid_uid) {
// called by gpu simulation thread
if (grid_uid > 0) {
CUstream_st *stream = m_grid_id_to_stream[grid_uid];
kernel_info_t *kernel = stream->front().get_kernel();
assert(grid_uid == kernel->get_uid());
// Jin: should check children kernels for CDP
if (kernel->is_finished()) {
// std::ofstream kernel_stat("kernel_stat.txt",
// std::ofstream::out | std::ofstream::app); kernel_stat<< "
// kernel " << grid_uid << ": " << kernel->name();
// if(kernel->get_parent())
// kernel_stat << ", parent " <<
// kernel->get_parent()->get_uid() <<
// ", launch " << kernel->launch_cycle;
// kernel_stat<< ", start " << kernel->start_cycle <<
// ", end " << kernel->end_cycle << ", retire " <<
// gpu_sim_cycle + gpu_tot_sim_cycle << "\n";
// printf("kernel %d finishes, retires from stream %d\n",
// grid_uid, stream->get_uid()); kernel_stat.flush();
// kernel_stat.close();
stream->record_next_done();
m_grid_id_to_stream.erase(grid_uid);
kernel->notify_parent_finished();
delete kernel;
return true;
}
}
return false;
}
void stream_manager::stop_all_running_kernels() {
pthread_mutex_lock(&m_lock);
// Signal m_gpu to stop all running kernels
m_gpu->stop_all_running_kernels();
// Clean up all streams waiting on running kernels
int count = 0;
while (check_finished_kernel()) {
count++;
}
// If any kernels completed, print out the current stats
if (count > 0) m_gpu->print_stats();
pthread_mutex_unlock(&m_lock);
}
stream_operation stream_manager::front() {
// called by gpu simulation thread
stream_operation result;
// if( concurrent_streams_empty() )
m_service_stream_zero = true;
if (m_service_stream_zero) {
if (!m_stream_zero.empty() && !m_stream_zero.busy()) {
result = m_stream_zero.next();
if (result.is_kernel()) {
unsigned grid_id = result.get_kernel()->get_uid();
m_grid_id_to_stream[grid_id] = &m_stream_zero;
}
} else {
m_service_stream_zero = false;
}
}
if (!m_service_stream_zero) {
std::list<struct CUstream_st *>::iterator s = m_last_stream;
if (m_last_stream == m_streams.end()) {
s = m_streams.begin();
} else {
s++;
}
for (size_t ii = 0; ii < m_streams.size(); ii++, s++) {
if (s == m_streams.end()) {
s = m_streams.begin();
}
m_last_stream = s;
CUstream_st *stream = *s;
if (!stream->busy() && !stream->empty()) {
result = stream->next();
if (result.is_kernel()) {
unsigned grid_id = result.get_kernel()->get_uid();
m_grid_id_to_stream[grid_id] = stream;
}
break;
}
}
}
return result;
}
void stream_manager::add_stream(struct CUstream_st *stream) {
// called by host thread
pthread_mutex_lock(&m_lock);
m_streams.push_back(stream);
pthread_mutex_unlock(&m_lock);
}
void stream_manager::destroy_stream(CUstream_st *stream) {
// called by host thread
pthread_mutex_lock(&m_lock);
while (!stream->empty())
;
std::list<CUstream_st *>::iterator s;
for (s = m_streams.begin(); s != m_streams.end(); s++) {
if (*s == stream) {
m_streams.erase(s);
break;
}
}
delete stream;
m_last_stream = m_streams.begin();
pthread_mutex_unlock(&m_lock);
}
bool stream_manager::concurrent_streams_empty() {
bool result = true;
if (m_streams.empty()) return true;
// called by gpu simulation thread
std::list<struct CUstream_st *>::iterator s;
for (s = m_streams.begin(); s != m_streams.end(); ++s) {
struct CUstream_st *stream = *s;
if (!stream->empty()) {
// stream->print(stdout);
result = false;
break;
}
}
return result;
}
bool stream_manager::empty_protected() {
bool result = true;
pthread_mutex_lock(&m_lock);
if (!concurrent_streams_empty()) result = false;
if (!m_stream_zero.empty()) result = false;
pthread_mutex_unlock(&m_lock);
return result;
}
bool stream_manager::empty() {
bool result = true;
if (!concurrent_streams_empty()) result = false;
if (!m_stream_zero.empty()) result = false;
return result;
}
void stream_manager::print(FILE *fp) {
pthread_mutex_lock(&m_lock);
print_impl(fp);
pthread_mutex_unlock(&m_lock);
}
void stream_manager::print_impl(FILE *fp) {
fprintf(fp, "GPGPU-Sim API: Stream Manager State\n");
std::list<struct CUstream_st *>::iterator s;
for (s = m_streams.begin(); s != m_streams.end(); ++s) {
struct CUstream_st *stream = *s;
if (!stream->empty()) stream->print(fp);
}
if (!m_stream_zero.empty()) m_stream_zero.print(fp);
}
void stream_manager::push(stream_operation op) {
struct CUstream_st *stream = op.get_stream();
// block if stream 0 (or concurrency disabled) and pending concurrent
// operations exist
bool block = !stream || m_cuda_launch_blocking;
while (block) {
pthread_mutex_lock(&m_lock);
block = !concurrent_streams_empty();
pthread_mutex_unlock(&m_lock);
};
pthread_mutex_lock(&m_lock);
if (!m_gpu->cycle_insn_cta_max_hit()) {
// Accept the stream operation if the maximum cycle/instruction/cta counts
// are not triggered
if (stream && !m_cuda_launch_blocking) {
stream->push(op);
} else {
op.set_stream(&m_stream_zero);
m_stream_zero.push(op);
}
} else {
// Otherwise, ignore operation and continue
printf(
"GPGPU-Sim API: Maximum cycle, instruction, or CTA count hit. "
"Skipping:");
op.print(stdout);
printf("\n");
}
if (g_debug_execution >= 3) print_impl(stdout);
pthread_mutex_unlock(&m_lock);
if (m_cuda_launch_blocking || stream == NULL) {
unsigned int wait_amount = 100;
unsigned int wait_cap = 100000; // 100ms
while (!empty()) {
// sleep to prevent CPU hog by empty spin
// sleep time increased exponentially ensure fast response when needed
usleep(wait_amount);
wait_amount *= 2;
if (wait_amount > wait_cap) wait_amount = wait_cap;
}
}
}
void stream_manager::pushCudaStreamWaitEventToAllStreams(CUevent_st *e,
unsigned int flags) {
std::list<CUstream_st *>::iterator s;
for (s = m_streams.begin(); s != m_streams.end(); s++) {
stream_operation op(*s, e, flags);
push(op);
}
}
|
/**
* Contains all the debugging flags.
*
* @author Maxime PIA
*/
public class DebuggingFlags implements ValueClass {
// _global
private static final String LINE_LIST_HEX_INPUT_CHAR = "g";
// input after _hexadecimal replacements
private static final String LINE_LIST_HEX_AFTER_HEX_REPL_CHAR = "h";
// _transitory phase between hex & string
private static final String LINE_LIST_HEX_AFTER_STR_REPL_CHAR = "t";
// _input
private static final String LINE_HEX_INPUT_CHAR = "i";
// _validity
private static final String LINE_VALIDITY_CHAR = "v";
// _details
private static final String LINE_VALIDITY_DETAILS_CHAR = "d";
// _non formatted
private static final String LINE_NON_FORMATTED_CHAR = "n";
// _ultimate phase before formatting
private static final String LINE_NON_FORMATTED_AFTER_STR_REPL_CHAR = "u";
// _filtered out
private static final String LINE_REJECTED_CHAR = "f";
// _encoding
private static final String LINE_LIST_ENCODING_CHAR = "e";
// encoding _score
private static final String LINE_LIST_ENCODING_VALIDITY_CHAR = "s";
// _reason for the score
private static final String LINE_LIST_ENCODING_VALIDITY_DETAILS_CHAR = "r";
// e_xcluded encodings
private static final String LINE_LIST_ENCODING_REJECTED_CHAR = "x";
// hex selections _boundaries
private static final String HEX_SELECTIONS_BOUNDS_CHAR = "b";
// hex selections _content
private static final String HEX_SELECTIONS_CONTENT_CHAR = "c";
@CommandLineValue(
value = LINE_LIST_HEX_INPUT_CHAR,
description = "Global hexadecimal inputs"
)
public static final long LINE_LIST_HEX_INPUT = 1 << 0;
@CommandLineValue(
value = LINE_LIST_HEX_AFTER_HEX_REPL_CHAR,
description = "Hexadecimal versions of the global input after "
+ "hexadecimal replacements"
)
public static final long LINE_LIST_HEX_AFTER_HEX_REPL_INPUT = 1 << 1;
@CommandLineValue(
value = LINE_LIST_HEX_AFTER_STR_REPL_CHAR,
description = "Transitory versions of the global input after "
+ "hexadecimal to string replacements"
)
public static final long LINE_LIST_HEX_AFTER_STR_REPL_INPUT = 1 << 2;
@CommandLineValue(
value = LINE_HEX_INPUT_CHAR,
description = "Hexadecimal versions of the converted lines"
)
public static final long LINE_HEX_INPUT = 1 << 3;
@CommandLineValue(
value = LINE_VALIDITY_CHAR,
description = "Validity mark of the lines"
)
public static final long LINE_VALIDITY = 1 << 4;
@CommandLineValue(
value = LINE_VALIDITY_DETAILS_CHAR,
description = "Details on the validity mark of the lines"
)
public static final long LINE_VALIDITY_DETAILS = (1 << 5) | LINE_VALIDITY;
@CommandLineValue(
value = LINE_NON_FORMATTED_CHAR,
description = "Non formatted version of the lines"
)
public static final long LINE_NON_FORMATTED = 1 << 6;
@CommandLineValue(
value = LINE_NON_FORMATTED_AFTER_STR_REPL_CHAR,
description = "Non formatted version of the lines after string "
+ "replacements"
)
public static final long LINE_NON_FORMATTED_AFTER_STR_REPL = 1 << 7;
@CommandLineValue(
value = LINE_REJECTED_CHAR,
description="Lines rejected due to their validity"
)
public static final long LINE_REJECTED = 1 << 8;
private static final String AUTODETECT_CONDITION =
"Encoding detection is activated";
@CommandLineValue(
value = LINE_LIST_ENCODING_CHAR,
description = "Encoding detected for the converted lines",
condition = AUTODETECT_CONDITION
)
public static final long LINE_LIST_ENCODING = 1 << 9;
@CommandLineValue(
value = LINE_LIST_ENCODING_VALIDITY_CHAR,
description =
"Validity mark of the encoding detected for the converted lines",
condition = AUTODETECT_CONDITION
)
public static final long LINE_LIST_ENCODING_VALIDITY = 1 << 10;
@CommandLineValue(
value = LINE_LIST_ENCODING_VALIDITY_DETAILS_CHAR,
description = "Details on the validity mark of the encoding detected "
+ "for the converted lines",
condition = AUTODETECT_CONDITION
)
public static final long LINE_LIST_ENCODING_VALIDITY_DETAILS =
(1 << 11) | LINE_LIST_ENCODING_VALIDITY;
@CommandLineValue(
value = LINE_LIST_ENCODING_REJECTED_CHAR,
description = "Lines converted with a rejected encoding",
condition = AUTODETECT_CONDITION
)
public static final long LINE_LIST_ENCODING_REJECTED = 1 << 12;
@CommandLineValue(
value = HEX_SELECTIONS_BOUNDS_CHAR,
description = "Selection bounds"
)
public static final long HEX_SELECTIONS_BOUNDS = 1 << 13;
@CommandLineValue(
value = HEX_SELECTIONS_CONTENT_CHAR,
description = "Hex content of a selection"
)
public static final long HEX_SELECTIONS_CONTENT = 1 << 14;
/**
* Gives the command line string values corresponding to an actual long
* value.
*
* @param flags
* The actual numeric value.
* @return The command line string values equivalent of the argument.
* @throws IllegalArgumentException
* @throws IllegalAccessException
*/
public static String longToCmdFlags(long flags)
throws IllegalArgumentException, IllegalAccessException {
List<Field> flagFields = ReflectionUtils.getAnnotatedFields(
DebuggingFlags.class,
CommandLineValue.class
);
StringBuilder sb = new StringBuilder();
for (Field flagField : flagFields) {
long fieldValue = (Long) flagField.get(null);
if ((flags & fieldValue) == fieldValue) {
sb.append(
flagField.getAnnotation(CommandLineValue.class).value()
);
}
}
return sb.toString();
}
} |
// Logging is only available on storage accounts
func TestSetLogging(t *testing.T) {
service, delete := initServiceTest(t, "storage")
defer delete()
getResp, err := service.GetProperties(ctx, nil)
require.NoError(t, err)
getResp.Logging = &Logging{
Read: to.Ptr(true),
Write: to.Ptr(true),
Delete: to.Ptr(true),
Version: to.Ptr("1.0"),
RetentionPolicy: &RetentionPolicy{
Enabled: to.Ptr(true),
Days: to.Ptr(int32(5)),
},
}
resp, err := service.SetProperties(ctx, getResp.ServiceProperties, nil)
require.NoError(t, err)
require.NotNil(t, resp)
recording.Sleep(time.Second * 45)
received, err := service.GetProperties(ctx, nil)
require.NoError(t, err)
require.Equal(t, *getResp.Logging.Read, *received.Logging.Read)
require.Equal(t, *getResp.Logging.Write, *received.Logging.Write)
require.Equal(t, *getResp.Logging.Delete, *received.Logging.Delete)
require.Equal(t, *getResp.Logging.RetentionPolicy.Enabled, *received.Logging.RetentionPolicy.Enabled)
require.Equal(t, *getResp.Logging.RetentionPolicy.Days, *received.Logging.RetentionPolicy.Days)
} |
/**
* Transform a List of {@link PostEntity} into a Collection of {@link User}.
*
* @param postEntityCollection Object Collection to be transformed.
* @return {@link User} if valid {@link PostEntity} otherwise null.
*/
public List<Post> transform(Collection<PostEntity> postEntityCollection) {
List<Post> postList = new ArrayList<>();
Post post;
for (PostEntity postEntity : postEntityCollection) {
post = transform(postEntity);
if (post != null) {
postList.add(post);
}
}
return postList;
} |
Hiding in plain sight
Caption (bioherms_BlighReef.jpg): North-westerly view of the Bligh Reef area off Cape York. Depths are coloured red (shallow) to blue (deep), over a depth range of about 50 metres. Bathymetry data from Australian Hydrographic Service.
James Cook University, University of Sydney and Queensland University of Technology scientists working with laser data from the Royal Australian Navy have discovered a vast reef behind the familiar Great Barrier Reef.
JCU’s Dr Robin Beaman says the high-resolution seafloor data provided by LiDAR-equipped aircraft have revealed great fields of unusual donut-shaped circular mounds, each 200-300 metres across and up to 10 metres deep at the centre.
“We’ve known about these geological structures in the northern Great Barrier Reef since the 1970s and 80s, but never before has the true nature of their shape, size and vast scale been revealed,” he said.
Dr Beaman said it was an astounding revelation. “The deeper seafloor behind the familiar coral reefs amazed us.”
The fields of circular donut-shaped rings are Halimeda bioherms, large reef-like geological structures formed by the growth of Halimeda, a common green algae composed of living calcified segments.
These form small limestone flakes on death, looking much like white cornflakes. Over time these flakes build up into large reef-like mounds, or bioherms.
Mardi McNeil from Queensland University of Technology and lead author on the new research paper, said their extent is vast.
“We’ve now mapped over 6000 square kilometres. That’s three times the previously estimated size, spanning from the Torres Strait to just north of Port Douglas. They clearly form a significant inter-reef habitat which covers an area greater than the adjacent coral reefs.”
Associate Professor Jody Webster from the University of Sydney, said the revelations about the extent of the bioherm field make questions over its vulnerability to climate change even more pressing.
“As a calcifying organism, Halimeda may be susceptible to ocean acidification and warming. Have the Halimeda bioherms been impacted, and if so to what extent?”
Dr Beaman said the discovery also opened up many other new avenues of research.
“For instance, what do the 10-20 metre thick sediments of the bioherms tell us about past climate and environmental change on the Great Barrier Reef over this 10,000 year time-scale? And, what is the finer-scale pattern of modern marine life found within and around the bioherms now that we understand their true shape?”
He said future research would require sediment coring, sub-surface geophysical surveys, and employing autonomous underwater vehicle technologies to unravel the physical, chemical and biological processes of the structures.
Link to paper:
New constraints on the spatial distribution and morphology of the Halimeda bioherms of the Great Barrier Reef, Australia:
http://link.springer.com/article/10.1007%2Fs00338-016-1492-2
High-res 3D images of the GBR bioherms can be seen at: http://ftt.jcu.edu.au/deepreef/temp/bioherms_image_caption.zip
*Laser Airborne Depth Sounder (LADS) uses red and green LiDAR technology to rapidly scan the seafloor to depths of about 50 metres, generating a dense grid of depth data points. The bathymetry data are then used by the Australian Hydrographic Service (AHS) to revise the nautical charts used by mariners.
* The Weapons Research Establishment first developed LADS airborne LiDAR bathymetry in Australia in 1972. The navy LADS Flight unit is based out of Cairns for year-round surveying of Australia’s shallow waters.
*Marine geologists previously understood the bioherms to form linear, dune-like parallel ridges and troughs, quite different to the complex seafloor now revealed with the benefit of LiDAR and multibeam swath bathymetry data collected in the past few years.
*Less than 15% of the Earth’s oceans deeper than about 200 metres have been mapped using modern surveying techniques. |
Effects of estrogen and tibolone on bladder histology and estrogen receptors in rats.
BACKGROUND
Estrogen deficiency causes atrophic changes within the urogenital tract, and is associated with urinary symptoms. The purpose of this study was to investigate the effects of estrogen and tibolone on bladder histology, and the changes of estrogen receptor alpha and beta (ERalpha and beta) protein expression in the detrusor muscle.
METHODS
Forty female rats were separated into four groups of ten each. They received a sham operation (Sham), ovariectomy (Ovx), ovariectomy plus estrogen replacement (Ovx + E), or ovariectomy plus tibolone treatment (Ovx + T). After 12 weeks each rat was anesthetized and the bladders were removed. The bladders' ultra structure, collagen fiber (CF) to smooth muscle (SM) ratio and ER subtypes were studied. Statistical analyses were performed using the one-way analysis of variance test.
RESULTS
Ovx resulted in significant degeneration in bladder ultra structure; however, estrogen and tibolone reversed those changes. Ovx increased the CF/SM ratio, estrogen and tibolone resulted in an increase. Two estrogen receptors (ERs) were expressed in the bladder detrusor, with ERbeta the main subtype. Ovx resulted in up-regulation of ERalpha and down-regulation of ERbeta. With estrogen and tibolone treatment, ERbeta showed a significant increase but ERalpha showed no significant difference compared with Ovx.
CONCLUSIONS
Estrogen deficiency deteriorates bladder ultra structure and histology. Supplementary estrogen can improve bladder function which may be due to inhibition of collagen hyperplasia and increased SM density. ERbeta has an important role in mediating estrogen function in the bladder. Tibolone has a mild estrogenic action and has an effect on bladder function and structure to some degree. |
def _simple_po_to_dict(tokens):
dsl = FUNC_TO_DSL.get(tokens[FUNCTION])
if dsl is None:
raise ValueError('invalid tokens: {}'.format(tokens))
return dsl(
namespace=tokens[NAMESPACE],
name=tokens[NAME],
) |
package properties;
import java.util.List;
import bookings.User;
public class Property {
private PropertyKind type;
private Address address;
private List<Service> services;
private Integer capacity;
private User owner;
public Property(PropertyKind type, Address address, List<Service> services, Integer capacity, User owner) {
super();
this.type = type;
this.address = address;
this.services = services;
this.capacity = capacity;
this.owner = owner;
}
public PropertyKind getType() {
return type;
}
public List<Service> getServices() {
return services;
}
public Integer getCapacity() {
return capacity;
}
public User getOwner() {
return owner;
}
public Address getAddress() {
return address;
}
}
|
def profile_ap_sku(x):
if bool(re.search("C91..AX", x)):
type_1 = "Access Point"
type_2 = 'AX'
if bool(re.search("(.*DNA)|(.*ADJ)", x)):
type_1 = "N/A"
type_2 = 'N/A'
class_type = 'N/A'
elif bool(re.search("C91[01].AX[IEW]", x)):
class_type = "Low"
elif bool(re.search("C9120AX[IE]", x)):
class_type = "Medium"
elif bool(re.search("C913.AX[IE]", x)):
class_type = "High"
else:
type_1 = "N/A"
type_2 = 'N/A'
class_type = 'N/A'
elif bool(re.search("AIR-[C]?AP[1-4]8..", x)):
type_1 = "Access Point"
type_2 = 'AC2'
if bool(re.search("AIR-[C]?AP18..[IEWMT]", x)):
class_type = "Low"
elif bool(re.search("AIR-[C]?AP28..[IE]", x)):
class_type = "Medium"
elif bool(re.search("AIR-[C]?AP38..[IEP]", x)):
class_type = "High"
elif bool(re.search("AIR-[C]?AP48..", x)):
class_type = "High"
else:
class_type = "N/A"
elif bool(re.search("AIR-AP15..", x)):
type_1 = "Access Point"
if bool(re.search("AIR-AP157.", x)):
type_2 = "AC1"
class_type = "Outdoor"
elif bool(re.search("AIR-AP15[46].", x)):
type_2 = "AC2"
class_type = "Outdoor"
else:
type_2 = 'N/A'
class_type = 'N/A'
elif bool(re.search("AIR-[C]?AP[1-3]7..", x)):
type_1 = "Access Point"
type_2 = 'AC1'
if bool(re.search("AIR-[C]?AP17..[IE]", x)):
class_type = "Low"
elif bool(re.search("AIR-[C]?AP27..[IE]", x)):
class_type = "Medium"
elif bool(re.search("AIR-[C]?AP37..[IE]", x)):
class_type = "High"
else:
class_type = 'N/A'
elif bool(re.search("MR.*-HW", x)):
type_1 = "Access Point"
if bool(re.search("MR[2-3][0-9]", x)):
class_type = "Low"
elif bool(re.search("MR4[0-9]", x)):
class_type = "Medium"
elif bool(re.search("MR5[0-9]", x)):
class_type = "High"
elif bool(re.search("MR[7-8][0-9]", x)):
class_type = "Outdoor"
else:
class_type = 'N/A'
if bool(re.search("MR[2-8]0", x)):
type_2 = 'AC1'
elif bool(re.search("MR[2-8][2-3]", x)):
type_2 = 'AC2'
elif bool(re.search("MR[2-8][4-6]", x)):
type_2 = 'AX'
else:
type_2 = 'N/A'
else:
type_1 = "N/A"
type_2 = 'N/A'
class_type = 'N/A'
return({"type_1":type_1, "type_2":type_2, "class_type":class_type}) |
<filename>app/src/main/java/GeneralCode/AdvanceFragment.java
package GeneralCode;
import android.app.Fragment;
import android.app.FragmentManager;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.View;
import android.view.ViewGroup;
public abstract class AdvanceFragment extends Fragment implements OnBackPressListener {
FragmentManager parentFM, childFM;
int view_id, menu_id = 0;
private View fragmentView;
private AdvanceFunctions f;
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, Bundle savedInstanceState) {
view_id = setFragmentLayoutID();
fragmentView = inflater.inflate(view_id, container, false);
menu_id = setOptionsMenu();
if (menu_id != 0) {
setHasOptionsMenu(true);
}
/*f = new AdvanceFunctions(getActivity());*/
initializeViews();
afterViewsInitialized();
return fragmentView;
}
@Override
public void Reset() {
fragmentReset();
}
@Override
public boolean onBackPressed(AdvanceFragment parent) {
setFragmentManagers();
int count;
try {
count = childFM != null ? childFM.getBackStackEntryCount() : 0;
if (count > 1) {
AdvanceFragment childFrag = (AdvanceFragment) childFM.findFragmentByTag(childFM.getBackStackEntryAt(0).getName());
return childFrag.onBackPressed(this);
} else {
childFM.popBackStackImmediate();
return true;
}
} catch (NullPointerException e) {
try {
count = parentFM != null ? parentFM.getBackStackEntryCount() : 0;
if (count > 1) {
AdvanceFragment siblingFrag = (AdvanceFragment) parentFM.findFragmentByTag(parentFM.getBackStackEntryAt(0).getName());
return siblingFrag.onBackPressed(this);
} else {
parentFM.popBackStackImmediate();
parent.fragmentReset();
return true;
}
} catch (Exception e1) {
return false;
}
}
/*if (childCount == 0) {
if (siblingCount == 1) return false;
else {
int currentIndex = getFragIndexAtBAckStack(parentFM, this);
if (currentIndex < parentFM.getBackStackEntryCount() - 1) {
AdvanceFragment siblingFrag;
siblingFrag = (AdvanceFragment) parentFM.findFragmentByTag(parentFM.getBackStackEntryAt(currentIndex + 1).getName());
return siblingFrag.onBackPressed(this);
*//*if (siblingFrag.onBackPressed(this)) return true;
else {// Suppose when you want skip jumping to previous fragment
this.fragmentReset();
parentFM.popBackStackImmediate();
childFM.popBackStackImmediate();
if (parent != null)
((OnBackPressListener) parent).Reset();
return true;
}*//*
} else {
this.fragmentReset();
parentFM.popBackStackImmediate();
if (parent != null)
((OnBackPressListener) parent).Reset();
return true;
}
}
} else {
AdvanceFragment childFrag = (AdvanceFragment) childFM.findFragmentByTag(childFM.getBackStackEntryAt(0).getName());
if (!childFrag.onBackPressed(this)) {
childFM.popBackStackImmediate();
((AdvanceFragment) childFM.findFragmentByTag(childFM.getBackStackEntryAt(childFM.getBackStackEntryCount() - 1).getName())).fragmentReset();
}
return true;
}*/
}
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
super.onCreateOptionsMenu(menu, inflater);
inflater.inflate(menu_id, menu);
}
private int getFragIndexAtBAckStack(FragmentManager fm, AdvanceFragment fragment) {
String fragmentName = fragment.getClass().getSimpleName();
for (int i = 0; i < fm.getBackStackEntryCount(); i++) {
if (fm.getBackStackEntryAt(i).getName().equalsIgnoreCase(fragmentName)) {
return i;
}
}
return 0;
}
private void setFragmentManagers() {
this.parentFM = this.getFragmentManager();
this.childFM = this.getChildFragmentManager();
}
protected abstract int setFragmentLayoutID();
protected abstract void initializeViews();
protected abstract void afterViewsInitialized();
protected abstract int setOptionsMenu();
public abstract void fragmentReset();
protected abstract void hide();
protected <T extends View> T $(int view_id) {
return fragmentView.findViewById(view_id);
}
protected <T extends Object> T $(String resourceType, String resourceName) {
return AdvanceFunctions.getResource(getActivity(), resourceType, resourceName);
}
}
|
<reponame>usenixatc2021/SoftRefresh_Scheduling
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
struct meminfo;
struct machine_desc;
extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
#endif
|
In a move that has me scratching my head, the California legislators have seen fit to pass a law that requires smartphones to have a remote kill switch built into them. Don’t worry for the time being as this only affects smartphones sold after July 1, 2015.
Still, it leaves one wondering, what are they thinking?
While the premise here is to protect people when they have their phones stolen the possibility of this being perverted by either law enforcement of criminal alike has not gone unnoticed. This reminds me of a patent that Apple received in 2012 for technology that would allow the government to disable video, camera and wi-fi remotely on an iPhone.
In the patent Apple states,
As wireless devices such as cellular telephones, pagers, personal media devices and smartphones become ubiquitous, more and more people are carrying these devices in various social and professional settings. The result is that these wireless devices can often annoy, frustrate, and even threaten people in sensitive venues. For example, cell phones with loud ringers frequently disrupt meetings, the presentation of movies, religious ceremonies, weddings, funerals, academic lectures, and test-taking environments.
Slightly beyond the requirements of merely law enforcement. In the summary section of the patent they discuss the ability to remotely disable camera, bluetooth, wi-fi, GPS, ringers and even cause a phone to remain in sleep mode. These are just some of the highlights outlined.
The California Senate Bill No. 962 (.pdf) states,
The bill would require that the technological solution, when enabled, be able to withstand a hard reset, as defined, and prevent reactivation of the smartphone on a wireless network except by an authorized user.
But, the law does not apply to phones that are resold in a secondary market. That being said, there is no provision to disable these features should the phone be resold. This could possibly leave the owner open to remote access from third parties. The “what ifs” are numerous.
The law doesn't spell out how the kill switch should be designed and implemented. They just stipulate that is should be there otherwise fines from $500 - $2500 could be applied per violation if a retailer knowingly sells a device that does not comply with the law.
I understand the intention here but, I am puzzled as if this was ever thought out properly. No manufacturer will produce a phone specifically for the California market. It just would not make financial sense. That being said, smartphone manufacturers would most likely build this sort of remote kill switch capability into their devices by default. I'm certain they would be able to find a market for this in other jurisdictions.
What was that one about the road being paved with good intentions?
(Image used under CC from Marcin Wichary) |
<reponame>maybe-dragon/arma3-texture-swap
#include "dialog_parent_configs.hpp"
// Grid Macros
#define GUI_GRID_X (0)
#define GUI_GRID_Y (0)
#define GUI_GRID_W (0.025)
#define GUI_GRID_H (0.04)
#define GUI_GRID_WAbs (1)
#define GUI_GRID_HAbs (1)
// Control ids
#define IDC_F85_TEXTURESWAP_RSCLISTBOX_TEXTURES 1500
#define IDC_F85_TEXTURESWAP_RSCLISTBOX_ANIMATIONS 1501
class F85_TextureSwap_MainDialog {
idd = 85000;
movingEnable = false;
enableSimulation = true;
class Controls {
////////////////////////////////////////////////////////
// GUI EDITOR OUTPUT START (by Fafnir, v1.063, #Dyryvo)
////////////////////////////////////////////////////////
class F85_TextureSwap_Label_Texture: F85_TextureSwap_RscText
{
idc = -1;
text = "Select Texture"; //--- ToDo: Localize;
x = 6 * GUI_GRID_W + GUI_GRID_X;
y = 3 * GUI_GRID_H + GUI_GRID_Y;
w = 12 * GUI_GRID_W;
h = 1 * GUI_GRID_H;
};
class F85_TextureSwap_ListBox_Texture: F85_TextureSwap_RscListbox
{
idc = IDC_F85_TEXTURESWAP_RSCLISTBOX_TEXTURES;
x = 6 * GUI_GRID_W + GUI_GRID_X;
y = 4 * GUI_GRID_H + GUI_GRID_Y;
w = 12 * GUI_GRID_W;
h = 13 * GUI_GRID_H;
};
class F85_TextureSwap_Label_Animation: F85_TextureSwap_RscText
{
idc = -1;
text = "Toggle Extras"; //--- ToDo: Localize;
x = 22 * GUI_GRID_W + GUI_GRID_X;
y = 3 * GUI_GRID_H + GUI_GRID_Y;
w = 12 * GUI_GRID_W;
h = 1 * GUI_GRID_H;
};
class F85_TextureSwap_ListBox_Animation: F85_TextureSwap_RscListbox
{
idc = IDC_F85_TEXTURESWAP_RSCLISTBOX_ANIMATIONS;
x = 22 * GUI_GRID_W + GUI_GRID_X;
y = 4 * GUI_GRID_H + GUI_GRID_Y;
w = 12 * GUI_GRID_W;
h = 13 * GUI_GRID_H;
style = ST_MULTI + LB_MULTI;
};
////////////////////////////////////////////////////////
// GUI EDITOR OUTPUT END
////////////////////////////////////////////////////////
};
};
|
Bob Barker says the price is too high for CBS’ Zoo.
The former The Price Is Right host and longtime animal activist sent CBS Television Studios a letter Thursday asking that the production cease using live animals in its animals-run-amuck thriller, and instead create the creatures with CGI.
“As former host of the longest-running show on CBS, I am writing to you after hearing from my friends at PETA that your network continues to allow wild animals to be used on Zoo, despite learning that, when used for entertainment, big cats and other animals are torn away from their mothers, subjected to abusive training methods, and locked inside tiny cages,” Barker wrote. “I urge you to end this exploitation and adopt a network-wide policy banning the use of wild animals in future CBS programming.”
Zoo retuns on June 28. The adaptation of James Patterson’s novel was last summer’s most-watched scripted series, but the thriller has been under attack by PETA for its use of real animals behind the scenes. Previously the production reportedly dropped plans to use a controversial animal trainer for the show.
“While I was relieved that Zoo dropped plans to use notorious animal trainer Michael Hackenberger this season after PETA notified producers that he had been caught on camera violently whipping a tiger — for which he is now facing five charges of cruelty to animals — eyewitness investigations and whistleblower reports have revealed that physical and psychological abuse of wild animals used for film and television is standard practice,” continued Barker, who hosted CBS’ iconic morning game show for 35 years until departing in 2007. “The only way to stop this cruelty is to replace the use of captive wild animals in your shows with the creative, versatile, and humane technology that exists today, such as computer-generated imagery. As demonstrated by productions such as Jon Favreau’s The Jungle Book, Darren Aronofsky’s Noah, and the upcoming David Yates film, The Legend of Tarzan, in which large numbers of animals have been created with such technology, it’s time for CBS to step up for animals and follow suit.”
PETA has taken out ads across the industry protesting the series, which first riled the organization last year after producers stated in interviews that they strived to use live animals whenever possible in the production. PETA claimed producers had assured them of precisely the opposite.
CBS responded to the letter with this statement: “We have the greatest respect for PETA and its cause. The health, safety and welfare of animals in our care during filming continues to be our highest priority.” |
Noise Performance of Niobium Nano-SQUIDs in Applied Magnetic Fields
We have fabricated two different designs of niobium dc nano-SQUIDs using focussed ion beam (FIB) lithography with loops and track widths down to 70 nm. We report on the voltage-flux and noise performance of the devices in zero field and with magnetic fields of up to 1 T applied either in-plane or perpendicular to the plane at temperatures between 5 K and 9 K. We compare the measurements with modeled estimates of the inductance of the structures and the conventional theory of noise in dc SQUIDs. |
#ifndef UTIL_H
#define UTIL_H
#define DTYPE float
#define ITYPE size_t // if chnage to unsigned int change the grid.x and gID in cuda kernel computation to long
#include <vector>
#include <algorithm>
#include <boost/sort/sort.hpp>
#include <iterator>
#include <unordered_map>
#include <map>
#include <boost/functional/hash.hpp>
#include <utility>
#include <bits/stdc++.h>
#include <time.h>
#include <sys/time.h>
#include <iomanip>
#include <iostream>
using namespace std;
class Tensor{
public:
ITYPE ndims;
ITYPE *dims;
ITYPE totNnz;
ITYPE nFibers;
ITYPE *accessK;
ITYPE *fbrLikeSlcInds;
bool switchBC = false; // if true change matrix rand() to 1
std::vector<ITYPE> modeOrder;
std::vector<ITYPE> fbrCount;
ITYPE **inds;
DTYPE *vals;
std::vector<vector<ITYPE>> fbrPtr;
std::vector<vector<ITYPE>> fbrIdx;
std::vector<vector<ITYPE>> slcMapperBin;
ITYPE *nnzPerSlice;
ITYPE *fiberPerSlice;
ITYPE *nnzPerFiber;
ITYPE *denseSlcPtr;
ITYPE *partPerNnz;
ITYPE *totnnzPerPart;
unordered_map<pair<ITYPE, ITYPE>, ITYPE, boost::hash<pair<ITYPE, ITYPE>>> fbrHashTbl;
};
class TiledTensor{
public:
ITYPE ndims;
ITYPE *dims;
ITYPE totNnz;
ITYPE nFibers;
ITYPE *accessK;
ITYPE *fbrLikeSlcInds;
std::vector<ITYPE> modeOrder;
std::vector<ITYPE> fbrCount;
ITYPE **inds;
DTYPE *vals;
std::vector<vector<ITYPE>> fbrPtr;
std::vector<vector<ITYPE>> fbrIdx;
std::vector<vector<ITYPE>> slcMapperBin;
ITYPE *nnzPerSlice;
ITYPE *fiberPerSlice;
ITYPE *nnzPerFiber;
ITYPE *denseSlcPtr;
ITYPE *partPerNnz;
ITYPE *totnnzPerPart;
ITYPE *nnzInRank;
ITYPE *fbrInRank;
ITYPE *slcInRank;
unordered_map<pair<ITYPE, ITYPE>, int, boost::hash<pair<ITYPE, ITYPE>>> fbrHashTbl;
};
class HYBTensor{
public:
ITYPE ndims;
ITYPE *dims;
ITYPE totNnz;
ITYPE HCSRnnz;
ITYPE COOnnz;
ITYPE CSLnnz;
ITYPE nFibers;
ITYPE *accessK;
std::vector<ITYPE> modeOrder;
ITYPE **inds;
DTYPE *vals;
std::vector<vector<ITYPE>> fbrPtr;
std::vector<vector<ITYPE>> fbrIdx;
std::vector<vector<ITYPE>> slcMapperBin;
ITYPE **COOinds;
DTYPE *COOvals;
std::vector<ITYPE> CSLslicePtr;
std::vector<ITYPE> CSLsliceIdx;
ITYPE **CSLinds;
DTYPE *CSLvals;
std::vector<vector<ITYPE>> CSLslcMapperBin;
HYBTensor(const Tensor &X)
{
ndims = X.ndims;
dims = new ITYPE[X.ndims];
totNnz = X.totNnz;
for (int i = 0; i < ndims; ++i)
{
dims[i] = X.dims[i];
modeOrder.push_back(X.modeOrder[i]);
}
}
};
class Matrix{
public:
ITYPE nRows;
ITYPE nCols;
DTYPE *vals;
};
class semiSpTensor{
public:
ITYPE nRows;
ITYPE nCols;
DTYPE *vals;
};
class Options {
public:
ITYPE R = 32;
ITYPE mode = 0;
ITYPE warpPerSlice = 4;
ITYPE nTile = 1;
ITYPE tileSize;
ITYPE gridSize = 512;
ITYPE TBsize = 128;
ITYPE MIfbTh = 1;
ITYPE fiberPerWarp = 1;
bool verbose = false; // if true change matrix rand() to 1
string inFileName;
string outFileName;
ITYPE nBin = 1;
std::string m0 = "012";
std::string m1 = "120";
std::string m2 = "201";
bool doCPD = false;
ITYPE cpdIters = 10;
bool natOrdering = false;
int redunMode;
ITYPE fbrThreashold = 99999999;
void print() {
std::cout << "R = " << R << '\n';
std::cout << "mode = " << mode << '\n';
std::cout << "warpPerSlice = " << warpPerSlice << '\n';
std::cout << "nTiles = " << nTile << '\n';
std::cout << "verbose = " << verbose << '\n';
// must provide input file name
if(inFileName.empty()){
std::cout << "Provide input file path. Program will exit." << std::endl;
exit(0);
}
else{
std::cout << "input file name = " << inFileName << '\n';
}
if(!outFileName.empty())
std::cout << "output file name = " << outFileName << '\n';
}
};
inline void check_opt(const Tensor &X, Options &Opt){
if(X.ndims > 4){
std::cout << "Supported tensor dimension is 3 or 4." << std::endl;
exit(0);
}
if(Opt.mode > X.ndims - 1){
std::cout << "Mode cannot be larger than tensor dimension." << std::endl;
exit(0);
}
Opt.mode = 0;
}
inline void order_tensormode(Tensor &X, const Options &Opt, const int mode){
int *sortMode = new int[X.ndims]; //sorted according to mode length
int *natMode = new int[X.ndims]; // natural ordering
bool *taken = new bool[X.ndims];
int *sortModeLen = new int[X.ndims];
for (int m = 0; m < X.ndims; ++m){
natMode[m] = (m + mode) % X.ndims;
sortModeLen[m] = X.dims[natMode[m]];
taken[m] = false;
}
if(Opt.natOrdering){
for (int i = 0; i < X.ndims; ++i)
X.modeOrder.push_back(natMode[i]);
std::cout << "Natural mode ordering " << std::endl;
}
else{
/*linear sort of dimension length*/
for (int i = 1; i < X.ndims; i++) {
for (int j =i+1; j < X.ndims; j++) {
if (sortModeLen[i] > sortModeLen[j])
std::swap(sortModeLen[i],sortModeLen[j]);
}
}
sortMode[0] = mode;
taken[mode] = true;
for (int i = 1; i < X.ndims; i++) {
for (int j = 0; j < X.ndims; j++) {
if( sortModeLen[i] == X.dims[j] && !taken[j]){
sortMode[i] = j;
taken[j] = true;
break;
}
}
}
for (int i = 0; i < X.ndims; ++i)
X.modeOrder.push_back(sortMode[i]);
}
if(Opt.verbose){
std::cout << "mode ordering: ";
for (int i = 0; i < X.ndims; ++i)
std::cout << X.modeOrder[i] << " ";
std::cout << std::endl;
}
delete[] sortMode;
delete[] natMode;
delete[] taken;
delete[] sortModeLen;
}
inline int load_tensor(Tensor &X, const Options &Opt){
if(Opt.verbose)
std::cout << std::endl << "Loading tensor.." << std::endl;
string filename = Opt.inFileName;
ITYPE index;
DTYPE vid=0;
ifstream fp(filename);
if(fp.fail()){
std::cout << "File " << filename << " does not exist!" << std::endl;
exit(0);
}
/*get number of line (totnnz)*/
int numLines = 0;
std::string unused;
while ( std::getline(fp, unused) )
++numLines;
X.totNnz = numLines - 2;
fp.clear(); // clear fail and eof bits
fp.seekg(0, std::ios::beg);
fp >> X.ndims;
X.dims = new ITYPE[X.ndims];
for (int i = 0; i < X.ndims; ++i)
fp >> X.dims[i];
X.inds = new ITYPE*[X.ndims];
for(int i = 0; i < X.ndims; ++i)
X.inds[i] = new ITYPE[X.totNnz];
X.vals = new DTYPE[X.totNnz];
int idxCntr = 0;
while(fp >> index) {
X.inds[0][idxCntr] = index-1;
for (int i = 1; i < X.ndims; ++i)
{
fp >> index;
X.inds[i][idxCntr] = index-1;
}
fp >> vid;
X.vals[idxCntr] = vid;
idxCntr++;
}
order_tensormode(X, Opt, Opt.mode);
return 0;
}
inline void init_tensor(Tensor *arrX, Tensor &X0, const Options &Opt, int mode){
arrX[mode].ndims = X0.ndims;
arrX[mode].dims = new ITYPE[arrX[mode].ndims];
arrX[mode].totNnz = X0.totNnz;
arrX[mode].inds = X0.inds;
arrX[mode].vals = X0.vals;
for (int i = 0; i < arrX[mode].ndims; ++i)
arrX[mode].dims[i] = X0.dims[i];
order_tensormode(arrX[mode], Opt, mode);
}
inline bool sort_pred_3D(tuple <ITYPE, ITYPE, ITYPE, DTYPE> left,
tuple <ITYPE, ITYPE, ITYPE, DTYPE> right) {
if (get<0>(left) != get<0>(right))
return (get<0>(left) < get<0>(right));
return (get<1>(left) < get<1>(right));
}
inline bool sort_pred_4D(tuple <ITYPE, ITYPE, ITYPE, ITYPE, DTYPE> left,
tuple <ITYPE, ITYPE, ITYPE, ITYPE, DTYPE> right) {
// return get<0>(left) < get<0>(right);
if (get<0>(left) != get<0>(right))
return (get<0>(left) < get<0>(right));
if (get<1>(left) != get<1>(right))
return (get<1>(left) < get<1>(right));
return (get<2>(left) < get<2>(right));
}
inline void sort_COOtensor(Tensor &X){
const ITYPE mode0 = X.modeOrder[0];
const ITYPE mode1 = X.modeOrder[1];
const ITYPE mode2 = X.modeOrder[2];
ITYPE mode3;
if(X.ndims == 4)
mode3 = X.modeOrder[3];
if(X.ndims == 3){
vector < tuple <ITYPE, ITYPE, ITYPE, DTYPE> > items;
tuple <ITYPE, ITYPE, ITYPE, DTYPE> ap;
for (long idx = 0; idx < X.totNnz; ++idx) {
ap=std::make_tuple(X.inds[mode0][idx], X.inds[mode1][idx], X.inds[mode2][idx], X.vals[idx]);
items.push_back(ap);
}
// std::sort(std::parallel::par, items.begin(), items.end(), sort_pred);
// std::sort(items.begin(), items.end(), sort_pred);
boost::sort::sample_sort(items.begin(), items.end(), sort_pred_3D);
for (long idx = 0; idx < X.totNnz; ++idx) {
X.inds[mode0][idx] = get<0>(items[idx]);
X.inds[mode1][idx] = get<1>(items[idx]);
X.inds[mode2][idx] = get<2>(items[idx]);
X.vals[idx] = get<3>(items[idx]);
}
}
else if(X.ndims == 4){
vector < tuple <ITYPE, ITYPE, ITYPE, ITYPE, DTYPE> > items;
tuple <ITYPE, ITYPE, ITYPE, ITYPE, DTYPE> ap;
for (long idx = 0; idx < X.totNnz; ++idx) {
ap=std::make_tuple(X.inds[mode0][idx], X.inds[mode1][idx], X.inds[mode2][idx], X.inds[mode3][idx], X.vals[idx]);
items.push_back(ap);
}
boost::sort::sample_sort(items.begin(), items.end(), sort_pred_4D);
for (long idx = 0; idx < X.totNnz; ++idx) {
X.inds[mode0][idx] = get<0>(items[idx]);
X.inds[mode1][idx] = get<1>(items[idx]);
X.inds[mode2][idx] = get<2>(items[idx]);
X.inds[mode3][idx] = get<3>(items[idx]);
X.vals[idx] = get<4>(items[idx]);
}
}
}
inline void sort_MI_CSF(const Tensor &X, TiledTensor *MTX, int m){
const ITYPE mode0 = MTX[m].modeOrder[0];
const ITYPE mode1 = MTX[m].modeOrder[1];
const ITYPE mode2 = MTX[m].modeOrder[2];
ITYPE mode3;
if(X.ndims == 4)
mode3 = MTX[m].modeOrder[3];
if(X.ndims == 3){
vector < tuple <ITYPE, ITYPE, ITYPE, DTYPE> > items;
tuple <ITYPE, ITYPE, ITYPE, DTYPE> ap;
for (long idx = 0; idx < MTX[m].totNnz; ++idx) {
ap=std::make_tuple(MTX[m].inds[mode0][idx], MTX[m].inds[mode1][idx], MTX[m].inds[mode2][idx], MTX[m].vals[idx]);
items.push_back(ap);
}
// sort(items.begin(), items.end(), sort_pred);
boost::sort::sample_sort(items.begin(), items.end(), sort_pred_3D);
for (long idx = 0; idx < MTX[m].totNnz; ++idx) {
MTX[m].inds[mode0][idx] = get<0>(items[idx]);
MTX[m].inds[mode1][idx] = get<1>(items[idx]);
MTX[m].inds[mode2][idx] = get<2>(items[idx]);
MTX[m].vals[idx] = get<3>(items[idx]);
}
}
else if(X.ndims == 4){
vector < tuple <ITYPE, ITYPE, ITYPE, ITYPE, DTYPE> > items;
tuple <ITYPE, ITYPE, ITYPE, ITYPE, DTYPE> ap;
for (long idx = 0; idx < MTX[m].totNnz; ++idx) {
ap=std::make_tuple(MTX[m].inds[mode0][idx], MTX[m].inds[mode1][idx], MTX[m].inds[mode2][idx], MTX[m].inds[mode3][idx], MTX[m].vals[idx]);
items.push_back(ap);
}
boost::sort::sample_sort(items.begin(), items.end(), sort_pred_4D);
for (long idx = 0; idx < MTX[m].totNnz; ++idx) {
MTX[m].inds[mode0][idx] = get<0>(items[idx]);
MTX[m].inds[mode1][idx] = get<1>(items[idx]);
MTX[m].inds[mode2][idx] = get<2>(items[idx]);
MTX[m].inds[mode3][idx] = get<3>(items[idx]);
MTX[m].vals[idx] = get<4>(items[idx]);
}
}
// std::cout << "sorted tile : " << m << std::endl;
// for (long idx = 0; idx < MTX[m].totNnz; ++idx) {
// std::cout << MTX[m].inds[0][idx] << " "
// << MTX[m].inds[1][idx] << " "
// << MTX[m].inds[2][idx] << " "
// << MTX[m].vals[idx] << std::std::endl;
// }
}
inline void print_HCSRtensor(const Tensor &X){
std::cout << "no of fibers " << X.fbrPtr[1].size() << std::endl;
ITYPE mode0 = X.modeOrder[0];
ITYPE mode1 = X.modeOrder[1];
ITYPE mode2 = X.modeOrder[2];
for(ITYPE slc = 0; slc < X.fbrIdx[0].size(); ++slc) {
for (int fbr = X.fbrPtr[0][slc]; fbr < X.fbrPtr[0][slc+1]; ++fbr){
for(ITYPE x = X.fbrPtr[1][fbr]; x < X.fbrPtr[1][fbr+1]; ++x) {
if(mode0 == 0)
std::cout << X.fbrIdx[0][slc] << " " << X.fbrIdx[1][fbr] << " " << X.inds[X.modeOrder[2]][x] << std::endl;
if(mode0 == 1)
std::cout << X.inds[X.modeOrder[2]][x] << " "<< X.fbrIdx[0][slc] <<" "<<X.fbrIdx[1][fbr] << " " <<std::endl;
if(mode0 == 2)
std::cout << X.fbrIdx[1][fbr]<<" "<< X.inds[X.modeOrder[2]][x] << " " << X.fbrIdx[0][slc] << std::endl;
}
}
}
}
inline void print_HCSRtensor_4D(const Tensor &X){
std::cout << "no of fibers " << X.fbrPtr[1].size() << std::endl;
ITYPE mode0 = X.modeOrder[0];
ITYPE mode1 = X.modeOrder[1];
ITYPE mode2 = X.modeOrder[2];
ITYPE mode3 = X.modeOrder[3];
for(ITYPE slc = 0; slc < X.fbrIdx[0].size(); ++slc) {
for (int fbrS = X.fbrPtr[0][slc]; fbrS < X.fbrPtr[0][slc+1]; ++fbrS){
for (int fbr = X.fbrPtr[1][fbrS]; fbr < X.fbrPtr[1][fbrS+1]; ++fbr){
for(ITYPE x = X.fbrPtr[2][fbr]; x < X.fbrPtr[2][fbr+1]; ++x) {
// if(mode0 == 0)
std::cout << X.fbrIdx[0][slc] << " " << X.fbrIdx[1][fbrS] << " " << X.fbrIdx[2][fbr] << " " << X.inds[3][x] << std::endl;
// if(mode0 == 1)
// std::cout << X.fbrIdx[1][fbr] << " " << X.inds[1][x] << " "<< X.fbrIdx[0][slc]; << std::endl;
// if(mode0 == 2)
// std::cout << X.inds[0][x] << " " << X.fbrIdx[0][slc]; << " " << X.fbrIdx[1][fbr]<< std::endl;
}
}
}
}
}
inline void print_TiledHCSRtensor(TiledTensor *TiledX, int tile){
std::cout << "Tile " << tile << " of Tensor X in Tiled HCSR format: " << std::endl;
const ITYPE mode0 = TiledX[tile].modeOrder[0];
const ITYPE mode1 = TiledX[tile].modeOrder[1];
const ITYPE mode2 = TiledX[tile].modeOrder[2];
if(TiledX[tile].ndims == 3){
for(ITYPE slc = 0; slc < TiledX[tile].fbrIdx[0].size(); ++slc) {
ITYPE idx0 = TiledX[tile].fbrIdx[0][slc]; //slc
int fb_st = TiledX[tile].fbrPtr[0][slc];
int fb_end = TiledX[tile].fbrPtr[0][slc+1];
// printf("slc st- end: %d %d %d \n", slc, fb_st, fb_end );
for (int fbr = fb_st; fbr < fb_end; ++fbr){
// printf("fbr %d : ", fbr );
for(ITYPE x = TiledX[tile].fbrPtr[1][fbr]; x < TiledX[tile].fbrPtr[1][fbr+1]; ++x) {
std::cout << idx0 << " " << TiledX[tile].inds[mode1][x] << " " << TiledX[tile].inds[mode2][x] << std::endl;
}
}
}
}
else if(TiledX[tile].ndims == 4){
for(ITYPE slc = 0; slc < TiledX[tile].fbrIdx[0].size(); ++slc) {
for (int fbrS = TiledX[tile].fbrPtr[0][slc]; fbrS < TiledX[tile].fbrPtr[0][slc+1]; ++fbrS){
for (int fbr = TiledX[tile].fbrPtr[1][fbrS]; fbr < TiledX[tile].fbrPtr[1][fbrS+1]; ++fbr){
for(ITYPE x = TiledX[tile].fbrPtr[2][fbr]; x < TiledX[tile].fbrPtr[2][fbr+1]; ++x) {
// if(mode0 == 0)
std::cout << TiledX[tile].fbrLikeSlcInds[fbrS] << " " << TiledX[tile].fbrIdx[1][fbrS] << " " << TiledX[tile].fbrIdx[2][fbr] << " " << TiledX[tile].inds[3][x] << std::endl;
// if(mode0 == 1)
// std::cout << X.fbrIdx[1][fbr] << " " << X.inds[1][x] << " "<< X.fbrIdx[0][slc]; << std::endl;
// if(mode0 == 2)
// std::cout << X.inds[0][x] << " " << X.fbrIdx[0][slc]; << " " << X.fbrIdx[1][fbr]<< std::endl;
}
}
}
}
}
}
inline void create_HCSR(Tensor &X, const Options &Opt){
ITYPE fbrThreashold = Opt.fbrThreashold;
fbrThreashold = 99999999;//
for (int i = 0; i < X.ndims - 1; ++i){
X.fbrPtr.push_back(std::vector<ITYPE>());
X.fbrIdx.push_back(std::vector<ITYPE>());
}
std::vector<ITYPE> prevId(X.ndims-1);
std::vector<ITYPE> fbrId(X.ndims-1);
for (int i = 0; i < X.ndims-1; ++i){
prevId[i] = X.inds[X.modeOrder[i]][0];
X.fbrPtr[i].push_back(0);
X.fbrIdx[i].push_back(prevId[i]);
X.fbrPtr[i].reserve(X.totNnz);
X.fbrIdx[i].reserve(X.totNnz);
}
int idx = 1 ;
while(idx < X.totNnz) {
for (int i = 0; i < X.ndims-1; ++i)
fbrId[i] = X.inds[X.modeOrder[i]][idx];
ITYPE fiberNnz = 1;
bool sameFbr = true;
for (int i = 0; i < X.ndims-1; ++i) {
if(fbrId[i] != prevId[i])
sameFbr = false;
}
/* creating last fiber consisting all nonzeroes in same fiber */
while( sameFbr && idx < X.totNnz && fiberNnz < fbrThreashold){
++idx;
fiberNnz++;
for (int i = 0; i < X.ndims-1; ++i) {
fbrId[i] = X.inds[X.modeOrder[i]][idx];
if(fbrId[i] != prevId[i])
sameFbr = false;
}
}
if(idx == X.totNnz)
break;
/* X.ndims-2 is the last fiber ptr. Out of prev while loop means it is a new fiber. */
X.fbrPtr[X.ndims-2].push_back(idx);
X.fbrIdx[X.ndims-2].push_back(fbrId[X.ndims-2]);
/* populating slice ptr and higher ptrs */
for (int i = X.ndims - 3; i > -1 ; --i) {
/* each dimension checks whether all parent/previous dimensions are in same slice/fiber/... */
bool diffFbr = false;
int iDim = i;
while(iDim > -1){
if( fbrId[iDim] != prevId[iDim]) {//not else ..not become this in loop
diffFbr = true;
}
iDim--;
}
if(diffFbr){
X.fbrIdx[i].push_back(fbrId[i]);
X.fbrPtr[i].push_back((ITYPE)(X.fbrPtr[i+1].size()) - 1);
}
}
for (int i = 0; i < X.ndims-1; ++i)
prevId[i] = fbrId[i];
++idx;
fiberNnz = 1;
}
X.fbrPtr[X.ndims-2].push_back(idx);
X.fbrIdx[X.ndims-2].push_back(fbrId[X.ndims-2]);
for (int i = X.ndims - 3; i > -1 ; --i)
X.fbrPtr[i].push_back((ITYPE)(X.fbrPtr[i+1].size() - 1 ));
X.nFibers = X.fbrPtr[1].size();
// for (int i =0; i < 2 ;i++)
// X.inds[X.modeOrder[i]].resize(0);
}
inline void prefix_sum(ITYPE *x, ITYPE *y, int n){
for (int i=0;i<=(log((double)n)-1);i++){
for (int j=0;j<=n-1;j++) {
y[j] = x[j];
if (j>=(powf(2,i))){
int t=powf(2,i);
y[j] += x[j-t];
}
}
for (int j=0;j<=n-1;j++){
x[j] = y[j];
}
}
}
inline void create_TiledHCSR(TiledTensor *TiledX, const Options &Opt, int tile){
ITYPE fbrThreashold = Opt.fbrThreashold;
ITYPE fbrSThreshold = 999999999;
fbrSThreshold = 128;
for (int i = 0; i < TiledX[tile].ndims - 1; ++i){
TiledX[tile].fbrPtr.push_back(std::vector<ITYPE>());
TiledX[tile].fbrIdx.push_back(std::vector<ITYPE>());
}
ITYPE mode0 = TiledX[tile].modeOrder[0];
ITYPE mode1 = TiledX[tile].modeOrder[1];
ITYPE mode2 = TiledX[tile].modeOrder[2];
// ITYPE mode3 = TiledX[tile].modeOrder[3];
std::vector<ITYPE> prevId(TiledX[tile].ndims-1);
std::vector<ITYPE> fbrId(TiledX[tile].ndims-1);
for (int i = 0; i < TiledX[tile].ndims-1; ++i){
prevId[i] = TiledX[tile].inds[TiledX[tile].modeOrder[i]][0];
TiledX[tile].fbrPtr[i].push_back(0);
TiledX[tile].fbrIdx[i].push_back(prevId[i]);
}
int idx = 1 ;
ITYPE fiberSNnz = 1;
while(idx < TiledX[tile].totNnz) {
for (int i = 0; i < TiledX[tile].ndims-1; ++i)
fbrId[i] = TiledX[tile].inds[TiledX[tile].modeOrder[i]][idx];
ITYPE fiberNnz = 1;
bool sameFbr = true;
for (int i = 0; i < TiledX[tile].ndims-1; ++i) {
if(fbrId[i] != prevId[i])
sameFbr = false;
}
/* creating last fiber consisting all nonzeroes in same fiber */
while( sameFbr && idx < TiledX[tile].totNnz && fiberNnz < fbrThreashold){
++idx;
fiberNnz++;
fiberSNnz++;
for (int i = 0; i < TiledX[tile].ndims-1; ++i) {
fbrId[i] = TiledX[tile].inds[TiledX[tile].modeOrder[i]][idx];
if(fbrId[i] != prevId[i])
sameFbr = false;
}
}
if(idx == TiledX[tile].totNnz)
break;
/* TiledX[tile].ndims-2 is the last fiber ptr. Out of prev while loop means it is a new fiber. */
TiledX[tile].fbrPtr[TiledX[tile].ndims-2].push_back(idx);
TiledX[tile].fbrIdx[TiledX[tile].ndims-2].push_back(fbrId[TiledX[tile].ndims-2]);
/* populating slice ptr and higher ptrs */
for (int i = TiledX[tile].ndims - 3; i > -1 ; --i) {
/* each dimension checks whether all parent/previous dimensions are in same slice/fiber/... */
bool diffFbr = false;
int iDim = i;
while(iDim > -1){
if( fbrId[iDim] != prevId[iDim]) {//not else ..not become this in loop
diffFbr = true;
}
/*splitting fbrS for 4D */
else if( TiledX[tile].ndims == 4 && iDim == 1 && fiberSNnz > fbrSThreshold){
diffFbr = true;
}
iDim--;
}
if(diffFbr){
if(TiledX[tile].ndims == 4 && i == 1)
fiberSNnz = 1;
TiledX[tile].fbrIdx[i].push_back(fbrId[i]);
TiledX[tile].fbrPtr[i].push_back((ITYPE)(TiledX[tile].fbrPtr[i+1].size()) - 1);
}
}
for (int i = 0; i < TiledX[tile].ndims-1; ++i)
prevId[i] = fbrId[i];
++idx;
fiberSNnz++;
fiberNnz = 1;
}
TiledX[tile].fbrPtr[TiledX[tile].ndims-2].push_back(idx);
TiledX[tile].fbrIdx[TiledX[tile].ndims-2].push_back(fbrId[TiledX[tile].ndims-2]);
for (int i = TiledX[tile].ndims - 3; i > -1 ; --i)
TiledX[tile].fbrPtr[i].push_back((ITYPE)(TiledX[tile].fbrPtr[i+1].size() - 1 ));
TiledX[tile].nFibers = TiledX[tile].fbrPtr[1].size();
// std::cout << tile << " nnz: " << TiledX[tile].totNnz << " nslices: " << TiledX[tile].fbrPtr[0].size() << " nFibers: " << TiledX[tile].nFibers << std::endl;
// if(tile == TiledX[tile].ndims - 1){
// if(TiledX[tile].totNnz){
// int totslc = TiledX[0].fbrPtr[0].size() + TiledX[1].fbrPtr[0].size() +TiledX[2].fbrPtr[0].size();
// int totFbr = TiledX[0].fbrPtr[1].size() + TiledX[1].fbrPtr[1].size() +TiledX[2].fbrPtr[1].size();
// std::cout << "Total slice: " << totslc << " " << totFbr << std::endl;
// }
// }
}
inline void create_fbrLikeSlcInds(Tensor &X, const Options &Opt){
X.fbrLikeSlcInds = (ITYPE *)malloc( X.nFibers * sizeof(ITYPE));
for(ITYPE slc = 0; slc < X.fbrIdx[0].size(); ++slc) {
for (int fbr = X.fbrPtr[0][slc]; fbr < X.fbrPtr[0][slc+1]; ++fbr){
X.fbrLikeSlcInds[fbr] = X.fbrIdx[0][slc] ;
}
}
}
inline void create_fbrLikeSlcInds(TiledTensor *TiledX, int mode){
TiledX[mode].fbrLikeSlcInds = (ITYPE *)malloc( TiledX[mode].nFibers * sizeof(ITYPE));
for(ITYPE slc = 0; slc < TiledX[mode].fbrIdx[0].size(); ++slc) {
for (int fbr = TiledX[mode].fbrPtr[0][slc]; fbr < TiledX[mode].fbrPtr[0][slc+1]; ++fbr){
TiledX[mode].fbrLikeSlcInds[fbr] = TiledX[mode].fbrIdx[0][slc] ;
}
}
}
inline int populate_paritions(Tensor &X, TiledTensor *MTX){
// avoid pushback by using tot nnzperpart
ITYPE *nnzCntr = new ITYPE[X.ndims];
memset(nnzCntr, 0, X.ndims * sizeof(ITYPE));
int mode;
for (int idx = 0; idx < X.totNnz; ++idx){
mode = X.partPerNnz[idx];
X.totnnzPerPart[mode]++;
}
for(int i = 0; i < X.ndims; ++i){
MTX[i].inds = new ITYPE*[X.ndims];
MTX[i].totNnz = X.totnnzPerPart[i];
for(int m = 0; m < X.ndims; ++m){
MTX[i].inds[m] = new ITYPE[X.totnnzPerPart[i]];
}
MTX[i].vals = new DTYPE[X.totnnzPerPart[i]];
}
for (int idx = 0; idx < X.totNnz; ++idx){
int mode = X.partPerNnz[idx];
for (int i = 0; i < X.ndims; ++i) {
MTX[mode].inds[i][nnzCntr[mode]] = X.inds[i][idx];
}
MTX[mode].vals[nnzCntr[mode]] = X.vals[idx];
nnzCntr[mode]++;
}
delete[] nnzCntr;
return 0;
}
inline int binarySearch1(ITYPE *arr, ITYPE left, ITYPE right, ITYPE value) {
while (left <= right) {
// int middle = (left + right) / 2;
int middle = ((unsigned int)left+(unsigned int)right) >> 1;
if (arr[middle] == value)
return middle;
else if (arr[middle] > value)
right = middle - 1;
else
left = middle + 1;
}
return -1;
}
inline int binarySearch(ITYPE *arr, ITYPE l, ITYPE r, ITYPE x) {
if (r >= l) {
// int mid = ((unsigned int)left+(unsigned int)right) >> 1;
// ITYPE mid = l + (r - l) / 2;
unsigned int mid = ((unsigned int)l + (unsigned int)r) >> 1;
if (arr[mid] == x)
return mid;
if (arr[mid] > x)
return binarySearch(arr, l, mid - 1, x);
return binarySearch(arr, mid + 1, r, x);
}
return -1;
}
inline int maxOf3( int a, int b, int c )
{
int max = ( a < b ) ? b : a;
return ( ( max < c ) ? c : max );
}
inline void mm_partition_reuseBased(Tensor *arrX, Tensor &X, TiledTensor *MTX, Options & Opt){
X.partPerNnz = new ITYPE[X.totNnz];
memset(X.partPerNnz, 0, X.totNnz * sizeof(ITYPE));
X.totnnzPerPart = new ITYPE[X.ndims];
memset(X.totnnzPerPart, 0, X.ndims * sizeof(ITYPE));
for (int m = 0; m < arrX[0].ndims; ++m){
if(m != Opt.redunMode){
int sliceMode=arrX[m].modeOrder[0];
int fiberMode=arrX[m].modeOrder[1];
arrX[m].nnzPerFiber = new ITYPE[arrX[m].nFibers];
memset(arrX[m].nnzPerFiber, 0, arrX[m].nFibers * sizeof(ITYPE));
arrX[m].nnzPerSlice = new ITYPE[arrX[m].dims[sliceMode]];
memset(arrX[m].nnzPerSlice, 0, arrX[m].dims[sliceMode] * sizeof(ITYPE));
arrX[m].denseSlcPtr = (ITYPE*)malloc( (arrX[m].dims[sliceMode]+1) * sizeof(ITYPE)); //new ITYPE[arrX[m].dims[sliceMode]];
memset(arrX[m].denseSlcPtr, 0, (arrX[m].dims[sliceMode] + 1 ) * sizeof(ITYPE));
}
}
/*creating dense slices so that nnz can directly index slices unlike fiber. For
fiber it needs to scan all fibers in a slice. */
for (int m = 0; m < arrX[0].ndims; ++m){
// if(m == Opt.redunMode) continue;
if(m != Opt.redunMode){
{
for(ITYPE slc = 0; slc < arrX[m].fbrIdx[0].size(); ++slc) {
arrX[m].denseSlcPtr[arrX[m].fbrIdx[0][slc]] = arrX[m].fbrPtr[0][slc];
if(slc == arrX[m].fbrIdx[0].size()-1)
arrX[m].denseSlcPtr[arrX[m].fbrIdx[0][slc]+1] = arrX[m].fbrPtr[0][slc];
else
arrX[m].denseSlcPtr[arrX[m].fbrIdx[0][slc]+1] = arrX[m].fbrPtr[0][slc+1];
/* Populate nnz per fiber and nnz per slice */
for (int fbr = arrX[m].fbrPtr[0][slc]; fbr < arrX[m].fbrPtr[0][slc+1]; ++fbr){
if(X.ndims == 3){
arrX[m].nnzPerFiber[fbr] = arrX[m].fbrPtr[1][fbr+1] - arrX[m].fbrPtr[1][fbr];
arrX[m].nnzPerSlice[arrX[m].fbrIdx[0][slc]] += arrX[m].nnzPerFiber[fbr];
}
else if(X.ndims == 4){
for (int fbrIn = arrX[m].fbrPtr[1][fbr]; fbrIn < arrX[m].fbrPtr[1][fbr+1]; ++fbrIn)
arrX[m].nnzPerFiber[fbr] += arrX[m].fbrPtr[2][fbrIn+1] - arrX[m].fbrPtr[2][fbrIn];
arrX[m].nnzPerSlice[arrX[m].fbrIdx[0][slc]] += arrX[m].nnzPerFiber[fbr];
}
}
}
}
}
}
//int threshold = ( X.totNnz / X.dims[0] + X.totNnz / X.dims[1] + X.totNnz / X.dims[2]) / 3;
//int thNnzInTile = X.totNnz*1;
/* initialize MICSF tiles */
int mode = 0;
for (int m = 0; m < X.ndims; ++m){
MTX[m].ndims = X.ndims;
MTX[m].dims = new ITYPE[MTX[m].ndims];
MTX[m].totNnz = 0; // WHY mode?
for (int i = 0; i < X.ndims; ++i){
MTX[m].modeOrder.push_back(arrX[m].modeOrder[i]);
MTX[m].dims[i] = X.dims[i];
}
}
/* Populate with nnz for each slice for each mode */
//ITYPE mode0 = 0;//std::min(X.dims[0], X.dims[1], X.dims[2]);
//ITYPE mode1 = 1;//X.modeOrder[1];
//ITYPE mode2 = 2;//X.modeOrder[2];
//ITYPE mode3 = 3;
//not mode sorted
int shortestMode = ( (X.dims[X.modeOrder[0]] <= X.dims[X.modeOrder[1]]) ? X.modeOrder[0] : X.modeOrder[1]) ;
bool sameFm0m1 = false, sameFm0m2 = false, sameFm1m2 = false, sameFm0m3 = false,
sameFm1m3 = false, sameFm2m3 = false;
int fbTh = Opt.MIfbTh;
//int slTh = 1,
int shortMode = 0;
int longMode = -1;
for (int m = 0; m < X.ndims; ++m){
if(m == 1){
if (arrX[m].modeOrder[1] == arrX[m-1].modeOrder[0] && arrX[m].modeOrder[0] == arrX[m-1].modeOrder[1]){
sameFm0m1 = true;
shortMode = (arrX[m].dims[0] <= arrX[m].dims[1] ? 0 : 1);
longMode = (arrX[m].dims[0] <= arrX[m].dims[1] ? 1 : 0);
}
}
else if(m == 2){
if(arrX[m].modeOrder[1] == arrX[m-2].modeOrder[0] && arrX[m].modeOrder[0] == arrX[m-2].modeOrder[1]){
sameFm0m2 = true;
shortMode = (arrX[m].dims[0] <= arrX[m].dims[2] ? 0 : 2);
longMode = (arrX[m].dims[0] <= arrX[m].dims[2] ? 2 : 0);
}
else if ( arrX[m].modeOrder[1] == arrX[m-1].modeOrder[0] && arrX[m].modeOrder[0] == arrX[m-1].modeOrder[1]){
sameFm1m2 = true;
shortMode = (arrX[m].dims[1] <= arrX[m].dims[2] ? 1 : 2);
longMode = (arrX[m].dims[1] <= arrX[m].dims[2] ? 2 : 1);
}
}
else if(m == 3){
if(arrX[m].modeOrder[1] == arrX[m-3].modeOrder[0] && arrX[m].modeOrder[0] == arrX[m-3].modeOrder[1]){
sameFm0m3 = true;
shortMode = (arrX[m].dims[0] <= arrX[m].dims[3] ? 0 : 3);
longMode = (arrX[m].dims[0] <= arrX[m].dims[3] ? 3 : 0);
}
else if ( arrX[m].modeOrder[1] == arrX[m-2].modeOrder[0] && arrX[m].modeOrder[0] == arrX[m-2].modeOrder[1]){
sameFm1m3 = true;
shortMode = (arrX[m].dims[1] <= arrX[m].dims[3] ? 1 : 3);
longMode = (arrX[m].dims[1] <= arrX[m].dims[3] ? 3 : 1);
}
else if ( arrX[m].modeOrder[1] == arrX[m-1].modeOrder[0] && arrX[m].modeOrder[0] == arrX[m-1].modeOrder[1]){
sameFm2m3 = true;
shortMode = (arrX[m].dims[2] <= arrX[m].dims[3] ? 2 : 3);
longMode = (arrX[m].dims[2] <= arrX[m].dims[3] ? 3 : 2);
}
}
}
bool casePr = false;
/******** Process NNZ********s*/
ITYPE *fbrNnz = new ITYPE[X.ndims];
ITYPE *fbrNo = new ITYPE[X.ndims];
ITYPE *curIdx = new ITYPE[X.ndims];
ITYPE *sliceNnz = new ITYPE[X.ndims];
ITYPE tmpSlc;
int nonSelMode ;
for (int idx = 0; idx < X.totNnz; ++idx){
bool modeDone = false;
for (int m = 0; m < X.ndims; ++m)
curIdx[m] = X.inds[m][idx];
/*Finding fiber nnz*/
for (int m = 0; m < X.ndims; ++m){
int nextMode = arrX[m].modeOrder[1];
//change to sameFm*m*
if((m == 1 && sameFm0m1) || (m == 2 && sameFm1m2) || (m == 3 && sameFm2m3)){
fbrNnz[m] = fbrNnz[m - 1];
fbrNo[m] = 99999;//curIdx[arrX[m].modeOrder[1]];
}
else if((m == 2 && sameFm0m2) || (m == 3 && sameFm1m3)){
fbrNnz[m] = fbrNnz[m - 2];
fbrNo[m] = 99999;//curIdx[arrX[m].modeOrder[1]];
}
else if(m == 3 && sameFm0m3){
fbrNnz[m] = fbrNnz[m - 3];
fbrNo[m] = 99999;//curIdx[arrX[m].modeOrder[1]];
}
else{
ITYPE result, tmp;
ITYPE idx_j = curIdx[arrX[m].modeOrder[1]];
tmpSlc = curIdx[m];
/*binary search*/
{
int n = arrX[m].denseSlcPtr[tmpSlc+1] - arrX[m].denseSlcPtr[tmpSlc];//sizeof(arr) / sizeof(arr[0]);
ITYPE fbr = arrX[m].denseSlcPtr[tmpSlc];
result = binarySearch1(&(arrX[m].fbrIdx[1][fbr]), 0, n, idx_j);
tmp = arrX[m].nnzPerFiber[result+fbr];
fbrNo[m] = result+fbr;
fbrNnz[m] = tmp;
}
}
}
if(X.ndims == 3){
//changing to > = from >
if ( fbrNnz[0] >= fbTh * std::max(fbrNnz[1] , fbrNnz[2]) && !modeDone) {
modeDone = true;
if(sameFm0m1 || sameFm0m2 || sameFm0m3){
mode = shortMode;
}
else{
mode = 0;
}
}
else if ( fbrNnz[1] >= fbTh * std::max(fbrNnz[0] , fbrNnz[2]) && !modeDone) {
modeDone = true;
if(sameFm1m2 || sameFm1m3)
mode = shortMode;
else
mode = 1;
}
else if ( fbrNnz[2] >= fbTh * std::max(fbrNnz[0] , fbrNnz[1]) && !modeDone) {
modeDone = true;
if(sameFm2m3)
mode = shortMode;
else
mode = 2;
}
}
else if(X.ndims == 4){
if ( fbrNnz[0] >= fbTh * maxOf3(fbrNnz[1] , fbrNnz[2], fbrNnz[3]) && !modeDone) {
modeDone = true;
if(sameFm0m1 || sameFm0m2 || sameFm0m3)
mode = shortMode;
else
mode = 0;
}
else if ( fbrNnz[1] >= fbTh * maxOf3(fbrNnz[0] , fbrNnz[2], fbrNnz[3]) && !modeDone) {
modeDone = true;
if(sameFm1m2 || sameFm1m3)
mode = shortMode;
else
mode = 1;
}
else if ( fbrNnz[2] >= fbTh * maxOf3(fbrNnz[0] , fbrNnz[1], fbrNnz[3]) && !modeDone) {
modeDone = true;
if(sameFm2m3)
mode = shortMode;
else
mode = 2;
}
else if ( fbrNnz[3] >= fbTh * maxOf3(fbrNnz[0] , fbrNnz[1], fbrNnz[2]) && !modeDone) {
modeDone = true;
mode = 3;
}
}
// slcNnzPerParti[mode][curIdx[mode]]++;
if(!modeDone)
mode = shortestMode;//mode = -1;
//fr_m
// if(mode == 1)
// nonSelMode = 0;
// else nonSelMode = 1;
//nell-2
for (int i = 0; i < X.ndims; ++i) {
if(mode == shortMode){
if( i != shortMode && i != longMode){
nonSelMode = i;
arrX[nonSelMode].nnzPerFiber[fbrNo[nonSelMode]]--;
}
}
else{
if(i != Opt.redunMode && i != mode){
nonSelMode = i;
arrX[nonSelMode].nnzPerFiber[fbrNo[nonSelMode]]--;
}
}
}
// if(mode == 1)
// nonSelMode = 2;
// else if(mode == 2)
// nonSelMode = 0;
// arrX[nonSelMode].nnzPerFiber[fbrNo[nonSelMode]]--;
mode = 2;
/*populate new partitions*/
if(mode > -1){
X.partPerNnz[idx] = mode;
}
if(casePr)
std::cout << "selected mode: " << mode << std::endl;
}
delete[] fbrNnz;
delete[] fbrNo;
delete[] curIdx;
delete[] sliceNnz;
}
// more detailed check
inline void create_mats(const Tensor &X, Matrix *U, const Options &Opt, bool ata){
ITYPE mode;
ITYPE R = Opt.R;
for (int m = 0; m < X.ndims; ++m){
mode = X.modeOrder[m];
U[mode].nRows = X.dims[mode];
U[mode].nCols = R;
if(ata)
U[mode].nRows = U[mode].nCols;
U[mode].vals = (DTYPE*)malloc(U[mode].nRows * U[mode].nCols * sizeof(DTYPE));
}
}
// jli added
inline DTYPE RandomValue(void)
{
DTYPE v = 3.0 * ((DTYPE) rand() / (DTYPE) RAND_MAX);
if(rand() % 2 == 0) {
v *= -1;
}
return v;
}
inline void randomize_mats(const Tensor &X, Matrix *U, const Options &Opt){
ITYPE mode;
for (int m = 0; m < X.ndims; ++m){
mode = X.modeOrder[m];
srand48(123L);
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c){ // or u[mode].nCols
if(Opt.doCPD)
U[mode].vals[r * U[mode].nCols + c] = RandomValue(); //mode + .5;//1.5 * (mode+1);;// .1 * drand48(); //1 ;//; //
else
U[mode].vals[r * U[mode].nCols + c] = mode + .5;//1.5
}
}
}
}
inline void zero_mat(const Tensor &X, Matrix *U, ITYPE mode){
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols +c] = 0;
}
}
inline void print_matrix(Matrix *U, ITYPE mode){
std::cout << U[mode].nRows << " x " << U[mode].nCols << " matrix" << std::endl;
std::cout << std::fixed;
for (int i = 0; i < 3; ++i)
// for (int i = U[mode].nRows-5; i < U[mode].nRows; ++i)
{
// for (int j = 0; j < U[mode].nCols; ++j)
for (int j = 0; j < 3; ++j)
{
std::cout << std::setprecision(2) << U[mode].vals[i * U[mode].nCols + j] << "\t" ;
}
std::cout << std::endl;
}
}
inline void write_output(Matrix *U, ITYPE mode, string outFile){
ofstream fp(outFile);
fp << U[mode].nRows << " x " << U[mode].nCols << " matrix" << std::endl;
fp << std::fixed;
for (int i = 0; i < U[mode].nRows; ++i)
{
for (int j = 0; j < U[mode].nCols; ++j)
{
fp << std::setprecision(2) << U[mode].vals[i * U[mode].nCols + j] << "\t" ;
}
fp << std::endl;
}
}
inline void correctness_check(DTYPE *out, DTYPE *COOout, int nr, int nc){
long mismatch = 0;
DTYPE maxDiff = 0;
DTYPE precision = 0.1;
std::cout << std::fixed;
for (int i = 0; i < nr; ++i){
for (int j = 0; j < nc; ++j){
DTYPE diff = abs(out[i * nc + j] - COOout[i * nc + j]);
if( diff > precision){
if(diff > maxDiff)
maxDiff = diff;
if(mismatch < 5 && j == 0)
std::cout << "mismatch at (" << i <<"," << j <<") got: " << out[i * nc +j] << " exp: " << COOout[i * nc +j] << ". "<< std::endl;
mismatch++;
}
}
}
if(mismatch == 0)
std::cout << "PASS!" << std::endl;
else{
std::cout << mismatch <<" mismatches found at " << precision << " precision. " << std::endl;
std::cout << "Maximum diff " << maxDiff << ". "<<std::endl;
}
}
inline double seconds(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
inline void print_help_and_exit() {
printf("options:\n\
-R rank/feature : set the rank (default 32)\n\
-m mode : set the mode of MTTKRP (default 0)\n\
-w warp per slice: set number of WARPs assign to per slice (default 4)\n\
-i output file name: e.g., ../dataset/delicious.tns \n\
-o output file name: optional dump\n");
exit(1);
}
inline Options parse_cmd_options(int argc, char **argv) {
Options param;
int i;
//handle options
for (i = 1; i < argc; i++) {
if (argv[i][0] != '-')
break;
if (++i >= argc){
print_help_and_exit();
}
switch (argv[i - 1][1]) {
case 'R':
param.R = atoi(argv[i]);
break;
case 'm':
param.mode = atoi(argv[i]);
break;
case 'w':
param.warpPerSlice = atoi(argv[i]);
break;
case 'l':
param.nTile = atoi(argv[i]);
break;
case 'f':
param.fbrThreashold = atoi(argv[i]);
break;
case 'b':
param.TBsize = atoi(argv[i]);
break;
case 's':
param.fiberPerWarp = atoi(argv[i]);
break;
case 'h':
param.MIfbTh = atoi(argv[i]);
break;
case 'g':
param.gridSize = atoi(argv[i]);
break;
case 'v':
if(atoi(argv[i]) == 1)
param.verbose = true;
else
param.verbose = false;
break;
case 'i':
param.inFileName = argv[i];
break;
case 'o':
param.outFileName = argv[i];
break;
case 'p':
param.m0 = argv[i];
break;
case 'q':
param.m1 = argv[i];
break;
case 'r':
param.m2 = argv[i];
break;
default:
fprintf(stderr, "unknown option: -%c\n", argv[i - 1][1]);
print_help_and_exit();
break;
}
}
if (i > argc){
std::cout << "weird " << argc << std::endl;
print_help_and_exit();
}
return param;
}
#endif
|
Visa of 25-year-old has been cancelled after conviction on three charges of attempting to enter a foreign state to engage in hostile activities in 2013
Melbourne man Amin Mohamed found guilty of seeking to fight in Syria
A Melbourne man who claimed he was travelling to Turkey to meet his fiancee has been found guilty of attempting to join the conflict in Syria.
In 2013 Amin Mohamed, 25, obtained a New Zealand passport, quit his job, booked a flight to Istanbul and discussed travelling to Syria with a man named Handi Alqudsi.
The New Zealand citizen had a ticket from Brisbane to Istanbul booked for 21 September 2013 but was prevented from leaving the country when his passport was cancelled.
Following his arrest, Mohamed, who was living in Sunshine West, told authorities he was travelling to Turkey on the way to meet his fiancee in Denmark.
He was charged with attempting to enter a foreign state to engage in hostile activities.
A Victorian supreme court jury on Tuesday found him guilty on three counts.
Relatives cried in court after the verdict was read out.
Justice Lex Lasry extended Mohamed’s bail until a pre-sentence plea hearing on 1 December, saying he is being held in an immigration detention centre because his visa has been cancelled. |
#ifndef UNITTEST_COMMON_HPP
#define UNITTEST_COMMON_HPP
#include <gtest/gtest.h>
#include "sbovector.hpp"
#include <memory>
#include <mutex>
#include <vector>
constexpr size_t SMALL_SIZE = 5;
constexpr size_t LARGE_SIZE = 100;
constexpr size_t SBO_SIZE = 16;
static_assert(SMALL_SIZE < SBO_SIZE);
static_assert(SBO_SIZE < LARGE_SIZE);
// NOTE: Requires Synchonization: see DataTypeOperationTrackingSBOVector::SharedDataMutex
template <typename T>
struct CountingAllocator {
struct Totals {
int allocs_{0};
int frees_{0};
} * totals_;
using value_type = T;
using pointer = T*;
using const_pointer = const T*;
using aligned_t = std::aligned_storage_t<sizeof(T), alignof(T)>;
using is_always_equal = std::true_type;
CountingAllocator(Totals* totals) : totals_(totals) {}
pointer allocate(size_t n, const void*) { return allocate(n); }
pointer allocate(size_t n) {
++totals_->allocs_;
return reinterpret_cast<pointer>(new aligned_t[n]);
}
void deallocate(pointer p, size_t) {
++totals_->frees_;
delete[] reinterpret_cast<aligned_t*>(p);
}
bool operator==(const CountingAllocator&) const { return true; }
};
template <typename T>
struct CustomAllocator {
int dummy_;
using value_type = T;
using pointer = T*;
using const_pointer = const T*;
using is_always_equal = std::false_type;
pointer allocate(size_t n, const void*) { return allocate(n); }
pointer allocate(size_t n) { return new T[n]; }
void deallocate(pointer p, size_t) { delete[] p; }
bool operator==(const CustomAllocator& that) const { return this == &that; }
bool operator!=(const CustomAllocator& that) const { return this != &that; }
};
using Trivial = int;
class NonTrivial {
private:
std::unique_ptr<int> val_;
public:
NonTrivial() noexcept { val_ = std::make_unique<int>(42); }
~NonTrivial() {}
NonTrivial(const NonTrivial&) noexcept : NonTrivial() {}
NonTrivial(NonTrivial&&) noexcept : NonTrivial() {}
NonTrivial& operator=(const NonTrivial&) noexcept { return *this; }
NonTrivial& operator=(NonTrivial&&) noexcept { return *this; }
};
class MoveOnly {
public:
MoveOnly() noexcept {}
~MoveOnly() {}
MoveOnly(const MoveOnly&) = delete;
MoveOnly(MoveOnly&&) noexcept {}
MoveOnly& operator=(const MoveOnly&) = delete;
MoveOnly& operator=(MoveOnly&&) noexcept { return *this; }
};
// NOTE: Requires Synchonization, see: DataTypeOperationTrackingSBOVector::SharedDataMutex
struct OperationCounter {
struct OperationTotals {
int default_constructor_{0};
int copy_constructor_{0};
int move_constructor_{0};
int copy_assignment_{0};
int move_assignment_{0};
int moved_destructor_{0};
int unmoved_destructor_{0};
int use_after_move_{0};
int uninitialized_use_{0};
int uninitialized_destruct_{0};
void reset() {
default_constructor_ = 0;
copy_constructor_ = 0;
move_constructor_ = 0;
copy_assignment_ = 0;
move_assignment_ = 0;
moved_destructor_ = 0;
unmoved_destructor_ = 0;
use_after_move_ = 0;
uninitialized_use_ = 0;
uninitialized_destruct_ = 0;
}
int moves() const { return move_constructor_ + move_assignment_; }
int copies() const { return copy_constructor_ + copy_assignment_; }
int constructs() const {
return default_constructor_ + copy_constructor_ + move_constructor_;
}
int destructs() const { return moved_destructor_ + unmoved_destructor_; }
};
bool moved_;
bool constructed_;
static OperationTotals TOTALS;
OperationCounter() noexcept {
++TOTALS.default_constructor_;
moved_ = false;
constructed_ = true;
}
OperationCounter(OperationCounter&& from) noexcept {
from.Use();
++TOTALS.move_constructor_;
moved_ = false;
constructed_ = true;
from.moved_ = true;
}
OperationCounter(const OperationCounter& copy) noexcept {
copy.Use();
moved_ = false;
constructed_ = true;
++TOTALS.copy_constructor_;
}
OperationCounter& operator=(OperationCounter&& from) noexcept {
if (!constructed_) {
on_uninit_use();
}
++TOTALS.move_assignment_;
moved_ = from.moved_;
from.moved_ = true;
return *this;
}
OperationCounter& operator=(const OperationCounter& copy) noexcept {
if (!constructed_) {
on_uninit_use();
}
moved_ = copy.moved_;
++TOTALS.copy_assignment_;
return *this;
}
~OperationCounter() {
++(moved_ ? TOTALS.moved_destructor_ : TOTALS.unmoved_destructor_);
if (!constructed_) {
on_uninit_destruct();
}
constructed_ = false;
}
void Use() const {
if (!constructed_) {
on_uninit_use();
}
if (moved_) {
on_use_after_move();
}
}
// Increment counter wrappers for errors (unified location for ease of debugging hook)
inline void on_uninit_use() const { ++TOTALS.uninitialized_use_; }
inline void on_use_after_move() const { ++TOTALS.use_after_move_; }
inline void on_uninit_destruct() const { ++TOTALS.uninitialized_destruct_; }
};
template <typename Data, typename Alloc = std::allocator<Data>>
struct TypeHelper {
typedef Data DataType;
typedef Alloc AllocatorType;
};
template <typename T>
struct SBOVector_ : public ::testing::Test {
template<
size_t Size = SBO_SIZE,
typename Allocator = typename T::AllocatorType,
typename DataType = typename T::DataType,
typename... Args
>
auto CreateContainer(Args&&... args) {
return SBOVector<DataType, Size, Allocator>(std::forward<Args>(args)...);
}
};
template <typename T>
struct CopyableSBOVector_ : public SBOVector_<T> {
template<
size_t Size = SBO_SIZE,
typename Allocator = typename T::AllocatorType,
typename DataType = typename T::DataType,
typename... Args
>
auto CreateContainer(Args&&... args) {
return SBOVector<DataType, Size, Allocator>(std::forward<Args>(args)...);
}
};
struct DataTypeOperationTrackingSBOVector : public ::testing::Test {
// Synchonization here is bad, unfortunately multi-threaded test enviornment
// was an afterthought
static std::mutex SharedDataMutex;
using DataType = OperationCounter;
using AllocatorType = CountingAllocator<DataType>;
using ContainerType = SBOVector<DataType, SBO_SIZE, AllocatorType>;
AllocatorType::Totals totals_;
AllocatorType create_allocator() { return AllocatorType(&totals_); }
ContainerType regular_container_ { create_allocator() };
void SetUp() {
SharedDataMutex.lock();
totals_.allocs_ = totals_.frees_ = 0;
OperationCounter::TOTALS.reset();
}
void TearDown() {
EXPECT_EQ(totals_.allocs_, totals_.frees_);
auto& op_totals = OperationCounter::TOTALS;
EXPECT_EQ(op_totals.constructs(), op_totals.destructs());
EXPECT_EQ(op_totals.uninitialized_use_, 0);
EXPECT_EQ(op_totals.use_after_move_, 0);
EXPECT_EQ(op_totals.uninitialized_destruct_, 0);
SharedDataMutex.unlock();
}
template<typename ContainerType_p>
void UseElements(const ContainerType_p& container) {
for (const auto& elem : container) {
elem.Use();
}
}
};
typedef ::testing::Types<TypeHelper<Trivial>,
TypeHelper<Trivial, CustomAllocator<Trivial>>,
TypeHelper<NonTrivial>>
CopyableTestCases;
typedef ::testing::Types<TypeHelper<Trivial>,
TypeHelper<Trivial, CustomAllocator<Trivial>>,
TypeHelper<NonTrivial>,
TypeHelper<MoveOnly>>
AllTestCases;
TYPED_TEST_SUITE(SBOVector_, AllTestCases, );
TYPED_TEST_SUITE(CopyableSBOVector_, CopyableTestCases, );
template <typename Range1, typename Range2>
void EXPECT_RANGE_EQ(const Range1& A, const Range2& B) {
auto begin1 = A.begin();
auto begin2 = B.begin();
auto end1 = A.end();
auto end2 = B.end();
ASSERT_EQ(std::distance(begin1, end1), std::distance(begin2, end2))
<< "Ranges must be of equal size!\n";
auto iter1 = begin1;
auto iter2 = begin2;
for (; iter1 != end1; ++iter1, ++iter2) {
EXPECT_EQ(*iter1, *iter2) << "Element mismatch at position "
<< std::distance(begin1, iter1) << "\n";
}
}
template <size_t... Values>
std::vector<int> vector_from_sequence() {
return std::vector<int>{Values...};
}
template <size_t... Values>
std::vector<int> vector_from_sequence(
std::integer_sequence<size_t, Values...>) {
return vector_from_sequence<Values...>();
}
template <size_t V>
std::vector<int> make_vector_sequence() {
return vector_from_sequence(std::make_index_sequence<V>());
}
#endif // UNITTEST_COMMON_HPP |
Somewhat recently Google came out with a cool new search feature that lets you start a timer via a search. I really liked this workflow since it let me really quickly set up a timer from the browser by using the chrome “tab to search” functionality. I’d just open a new tab, type in “google.com”, hit tab, type “x minute timer”, hit enter, and boom, instant timer. I could get a timer set up in seconds. But there was an annoyance. I had to switch to the tab to see how much time was left! How could I live like this? So I came up with a solution; what if the favicon updated to show how much time was left? I mostly set timers while I’m cooking, so this would allow me to pull up a recipe on one tab, and be able to instantly glance at how much time is left in the other tab. By using the favicon I could also pin the tab to use less space.
I did a thorough search and couldn’t find anything that did this, and favitimer.com was available, so I bought the domain and took a weekend to go make it. While building it, I ended up using a few interesting techniques, so I thought it’d be nice to do a write up on them. If you’d like to see the code, everything is up on GitHub;
##Updating the Favicon
The crux of this mini app is setting a dynamic image as the tab favicon. Not all browsers support this, so straight off, IE and Safari are out. IE doesn’t support updating the favicon at all before IE11, and Safari doesn’t show favicons at all. Favicons can only be updating via the meta tag’s href attribute, which only leaves us with one option for updating it dynamically, which is with a data URI. Data URIs are a great new browser feature that enable a whole host of new tricks, and I end up using it in a variety of different ways later on. It’s a way of encoding any kind of data, including binary data as an inline string. For binary data, you can encode it in base 64. You also include a MIME type to let the browser know what kind of data you’re encoding.
Now that we have a way of updating the favicon, we need a way to generate the image data. Currently the best way to generate bitmap images is through the canvas API, and even better, the canvas API already provides a toDataURL method for generating a data URI so updating the favicon is very easy. All you have to do is render something on your canvas and set the favicon link href attribute to the data URI result.
To show how much time is left on the timer, I opted for a circle outline that slowly fills up radially until it forms a complete circle. For example, a timer that’s 25% done will show a quarter circle. The canvas API also provides an arc drawing method that takes a start and end radian that makes it easy to create a semi-circle.
Here’s the full drawing code:
function draw ( canvas , percent , ms , color ) { var context = canvas . getContext ( '2d' ), diameter ; diameter = Math . min ( canvas . height , canvas . width ); context . lineWidth = diameter / 12 ; if ( percent >= 1 ){ context . strokeStyle = '#00cc00' ; } else { context . strokeStyle = '#dd0000' ; } context . strokeStyle = color || context . strokeStyle ; context . font = 'bold 7.5px monospace' ; context . textAlign = 'center' ; context . fillStyle = '#333333' ; context . clearRect ( 0 , 0 , canvas . width , canvas . height ); context . beginPath (); context . arc ( diameter / 2 , diameter / 2 , diameter * 0.375 , - Math . PI / 2 , - Math . PI / 2 + Math . PI * 2 * percent , false ); context . stroke (); if ( ms > 0 ) { ms /= 1000 ; ms = Math . ceil ( ms ); if ( ms > 60 ) { ms /= 60 ; } if ( ms > 60 ) { ms /= 60 ; } ms = Math . floor ( ms ); context . fillText ( ms , 8 , 11 , 10 ); } } function drawFavicon ( canvas , percent , ms , color ) { draw ( canvas , percent , ms , color ) link . href = canvas . toDataURL ( 'image/png' ); }
I reuse the image in the body of the page, so I made the method generic. Color is used to indicate the state of the timer, and the time left is represented in milliseconds, showing hours, minutes, or seconds left depending on how much time is left (the unit is left out because the number needs to fit into the small space of a favicon).
##Smoother Animation
Because the timer is displayed in the favicon, it’s intended to reside in an inactive tab so you can just glance at it while doing something else. But while building the app, I discovered a limitation to requestAnimationFrame , and setInterval , which is that they gets throttled to a few fps when the tab is inactive. The browser doesn’t want inactive tabs sucking up resources when nobody is looking at them. While that’s fine most of the time, it wasn’t OK for this app since you want to see the timer updating accurately when off the page!
After some research I found that you can workaround this issue by using a web worker. Web workers are designed to run in parallel, so they’re not throttled when a tab is inactive. Since web workers don’t have access to the original calling context, I wasn’t able to use requestAnimationFrame as that relies on the original window context, so that left my best option as setInterval . I created an interval worker that simply ran its own setInterval and passed a message back whenever it fired, and that fixed the animation smoothness problem.
The worker script is super simple:
var interval = null ; this . onmessage = function ( event ) { if ( event . data . start ) { interval = setInterval ( function () { this . postMessage ( '' ); }, event . data . ms ); } if ( event . data . stop ) { clearInterval ( interval ); } };
To start or stop a worker you just send a message with either {start: true} , or {stop: true} . I didn’t want to break support with browsers that don’t have web workers enabled, so I also wrapped the interval methods with a fallback to the regular setInterval method along with its caveat:
function startInterval ( callback , delay ) { var interval ; try { interval = new Worker ( workerURL ); interval . onmessage = callback ; interval . postMessage ({ start : true , ms : delay }); } catch ( error ) { interval = setInterval ( callback , delay ); } finally { return interval ; } } function stopInterval ( interval ) { if ( typeof interval === 'number' ) { clearInterval ( interval ); } else if ( interval ) { interval . postMessage ({ stop : true }); interval . terminate (); } }
As a side note, never assume that setTimeout or setInterval will actually execute in the time you tell it to, there are tons of factors that prevent them from being accurate, so if you need to base something on time elapsed, create a start timestamp and calculate the difference.
##Adding Sound
I played around with a couple ways of adding sound for the beep noise that plays when the timer finishes. Originally I used an inline base 64 encoded audio file and played it using the Audio API, but I wasn’t a fan of having a big binary file in the middle of the code. Originally I used this bit of code:
function beep () { var sound = new Audio ( "data:audio/wav;base64,..." ); sound . play (); }
Looking further I found the AudioContext API for creating sounds programmatically. The new beep function required more code, but was much more flexible, and cut down on the file size a lot:
function beep ( duration , frequency , volume ) { var oscillator , gainNode ; duration = duration || 500 ; oscillator = audioContext . createOscillator (); gainNode = audioContext . createGain (); oscillator . connect ( gainNode ); gainNode . connect ( audioContext . destination ); gainNode . gain . value = 0 ; oscillator . frequency . value = frequency ; oscillator . start (); gainNode . gain . setTargetAtTime ( volume , audioContext . currentTime , 0.005 ); gainNode . gain . setTargetAtTime ( 0 , audioContext . currentTime + ( duration / 1000 ), 0.005 ); };
The AudioContext API works by creating and hooking together various AudioNodes to create different tones. An OscillatorNode creates a wave function that sounds like a solid tone. A GainNode adjusts the volume of the oscillator node. The oscillator node is then connected to the gain node which is hooked up to the audio context output. The parameters for the sound are set, with gain setting the volume, and frequency setting the pitch of the tone. When I first tried starting and stopping the oscillator, it had the unfortunate side effect of creating a popping noise when it turned on and off, due to the sudden change of volume. To solve that, I used setTargetAtTime , which gradually changes the volume over time. It takes a target volume, an absolute time to change the volume at, and a value to set how quickly the sound should transition on an exponential basis. With that quick transition I was able to get rid of that popping noise, and the resulting beep was (while still purposefully annoying), less jarring.
##Making it Fast
Because the app is a simple single page Javascript utility, I wanted to make the page load as lightweight as possible. To do this I wanted to get the page down to a single request*, and that means inlining everything. For the most part it was pretty simple.
To handle the inlining and minification of the CSS and Javascript I just used the gulp-inline npm package.
I decided against using any frameworks of utility libraries so I wouldn’t have to manage a big inline library, and also to cut back on the file size of the page. That means using nothing but the pure native DOM API. The compatibility benefits of libraries like underscore or jQuery weren’t an issue as only modern browsers support canvas and updating the favicon dynamically anyways. It’s also nice to do a project pure every now and then to reconnect with the browser :).
Not all the inlining was trivial though. The most interesting part was inlining the web worker script. The web worker API takes a URL string to the worker script you want to use for the worker, so we can use the same data URI trick, but I didn’t want to have to re-encode the script every time I made an edit, so after doing some research I found that you can provide a local script by passing the string version of a function using toString to the web worker API by using the Blob data type. A Blob is a like a lightweight temporary file, and you can put any kind of data in it, including Javascript. After instantiating a Blob you can get a blob URI for it in the format of blob:(domain)/(hash) by passing it to the URL API using createObjectURL. After the temporary URL is created it acts like a normal URL. You can visit it, and include it, allowing you to use it as a worker script:
workerURL = ( function () { var blob , intervalWorker , workerString , Builder ; intervalWorker = function () { var interval = null ; this . onmessage = function ( event ) { if ( event . data . start ) { interval = setInterval ( function () { this . postMessage ( '' ); }, event . data . ms ); } if ( event . data . stop ) { clearInterval ( interval ); } }; }; workerString = "(" + intervalWorker . toString () + ")();" ; if ( Blob ) { blob = new Blob ([ workerString ], { type : 'application/javascript' }); } else if ( BlobBuilder || WebKitBlobBuilder || MozBlobBuilder ) { Builder = BlobBuilder || WebKitBlobBuilder || MozBlobBuilder ; blob = new Builder (); blob . append ( workerString ); blob = blob . getBlob (); } if ( blob ) { return URL . createObjectURL ( blob ); } else { return 'interval.js' ; } })();
workerString needs to be wrapped with the parenthesis to execute the function, otherwise it will just define it in the worker and do nothing.
In this project, I use data URIs in image, and code format. I think it really shows off the power and versatility of this cool feature. Since you can use them pretty much anywhere you’re pretty much just limited to your imagination!
The result of all this? The final payload is a single request that gzips down to 4.2kb. The page will load pretty much as quickly as the host will respond. On my internet the page loads consistently in under 50 ms minus the DNS lookup, and sometimes under 20ms.
* Ok, I have a Google Analytics script to spy on you guys, but that’s not a blocking resource for running the app, so it doesn’t slow anything down.
##Parsing Input
Another feature I wanted was semantic parsing of a query string so I could start a timer quickly from the URL bar. This was one of the main features I liked from setting a timer on Google, so I wanted to make sure I replicated this for my own convenience.
To start out, I wanted to allow setting the timer via a query parameter. I jumped around a bunch of possible parsing solutions with the main consideration being how flexible I wanted to make it. I wanted it to be able to accept multiple ways of writing time units, such as “seconds”, “second”, “secs”, “sec”, “s”, etc. Since I wanted pretty much every permutation, I settled on a regular expression that checked the first two starting characters of the unit, and then optionally consumed the rest of the letters for that unit, ignoring omitted letters.
I also wanted to be able to put in fractions of units, such as “0.5”, or “1/2”. To do that, I used a very carefully placed eval . Since the only input it can take is the result of a regex match, it’s only as safe as the regex is, which only looks for integers, . for decimals, and / for fractions.
function executeSearch ( search ) { var searchRegex = / ((?:\d * \. | \d + \s * \/\s * )?\d + )[\s + ] * ( se ? c ? o ? n ? d ? s ? |mi ? n ? u ? t ? e ? s ? |ho ? u ? r ? s ? |$ )(?![ a-z ]) /gi , hours = 0 , minutes = 0 , seconds = 0 , match , unit ; while ( match = searchRegex . exec ( search )) { unit = match [ 2 ] || '' ; switch ( unit [ 0 ]) { case 'm' : minutes = eval ( match [ 1 ]); break ; case 'h' : hours = eval ( match [ 1 ]); break ; default : seconds = eval ( match [ 1 ]); break ; } } ... }
Each time the regex executes, its keeps track of what position it has consumed up to thus far, so calling exec multiple times ends up finding all matching fragments in the location query string. Since it ignores any part of the string that doesn’t match, it also works for semantically written sentences such as “set a 10 minute and 30 second timer”.
I also wanted to enable tab to search to let you fill out the time in the address bar, which is enabled in chrome using an OpenSearch description document. To tell the browser where to find it, you include a special link tag in the head portion of the document:
<link type= "application/opensearchdescription+xml" rel= "search" href= "/search.xml" />
The search.xml file looks like this:
<?xml version="1.0"?> <OpenSearchDescription xmlns= "http://a9.com/-/spec/opensearch/1.1/" > <ShortName> Favitimer </ShortName> <Description> Enter a period of time to set a timer. </Description> <Url type= "text/html" method= "get" template= "http://www.favitimer.com/?q={searchTerms}" /> </OpenSearchDescription>
This describes the pattern and functionality of the search to the browser.
##That Was Fun
So even though this was a small and simple project, there’s still a good deal of novel tech going on in it. It was fun to work with some of the new web technologies and really focus on small details in a very scoped side project. I’m pretty proud of how the code came out, so give it a look. I hope you learned something in this write up, and happy coding! |
<gh_stars>1-10
#pragma once
#include <Arduino.h>
class AuthProvider {
public:
// Returns true if authentication is currently enabled
virtual bool isAuthenticationEnabled() const { return false; }
virtual const String& getUsername() const;
virtual const String& getPassword() const;
};
class SimpleAuthProvider : public AuthProvider {
public:
SimpleAuthProvider();
// Sets the provided username and password and enables authentication.
void requireAuthentication(const String& username, const String& password);
// Clears username and password and disables authentication
void disableAuthentication();
virtual bool isAuthenticationEnabled() const override;
virtual const String& getUsername() const override;
virtual const String& getPassword() const override;
private:
String username;
String password;
bool authEnabled;
};
template <class T>
class PassthroughAuthProvider : public AuthProvider {
public:
PassthroughAuthProvider(const T& proxy)
: proxy(proxy)
{ }
virtual bool isAuthenticationEnabled() const override {
return proxy.isAuthenticationEnabled();
}
virtual const String& getUsername() const override {
return proxy.getUsername();
}
virtual const String& getPassword() const override {
return proxy.getPassword();
}
private:
const T& proxy;
}; |
<reponame>appleskiller/neuron<filename>src/components/color/pallete.ts<gh_stars>0
import { IEmitter } from 'neurons-emitter';
import { Property, Binding, Emitter, Element } from '../../binding/factory/decorator';
import { SvgIcon } from '../icon/svgicon';
import { checkbox_uncheck, checkbox_check } from '../icon/icons';
import { theme } from '../style/theme';
import { ISVGIcon } from 'neurons-dom/dom/element';
import { ColorWheel } from './colorwheel';
import { color as colorUtils, isDefined } from 'neurons-utils';
import { HSlider } from '../slider/hslider';
import { IElementRef } from '../../binding/common/interfaces';
import { NumberInput } from '../input/number';
@Binding({
selector: 'ne-pallete-picker',
template: `
<div class="ne-pallete-picker">
<ne-color-wheel
[colors]="wheelColors"
[(saturation)]="saturation"
[(lightness)]="lightness"
(change)="onColorWheelChange($event)"
></ne-color-wheel>
<div class="ne-pallete-picker-colors">
<div class="ne-pallete-picker-colors-container">
<div *for="color in colors" [style.width]="getColorWidth()">
<div [style.background-color]="color"></div>
</div>
</div>
</div>
<div class="ne-pallete-picker-saturation ne-pallete-picker-slider">
<div class="ne-pallete-picker-slider-label">饱和度</div>
<ne-h-slider [min]="0" [max]="100" [(value)]="saturation" (change)="onSliderChange()"></ne-h-slider>
<div class="ne-pallete-picker-slider-bg">
<div [style.background-image]="saturationColorGradient"></div>
</div>
<ne-number-input [(value)]="saturation" [min]="0" [max]="100" (change)="onSliderChange()"></ne-number-input>
</div>
<div class="ne-pallete-picker-lightness ne-pallete-picker-slider">
<div class="ne-pallete-picker-slider-label">明度</div>
<ne-h-slider [min]="0" [max]="100" [(value)]="lightness" (change)="onSliderChange()"></ne-h-slider>
<div class="ne-pallete-picker-slider-bg">
<div [style.background-image]="lightnessColorGradient"></div>
</div>
<ne-number-input [(value)]="lightness" [min]="0" [max]="100" (change)="onSliderChange()"></ne-number-input>
</div>
<div class="ne-pallete-picker-alpha ne-pallete-picker-slider">
<div class="ne-pallete-picker-slider-label">透明度</div>
<ne-h-slider [min]="0" [max]="1" [(value)]="alpha" [step]="0.01" (change)="onSliderChange()"></ne-h-slider>
<div class="ne-pallete-picker-slider-bg">
<div [style.background-image]="alphaColorGradient"></div>
</div>
<ne-number-input [(value)]="alpha" [min]="0" [max]="1" [step]="0.01" (change)="onSliderChange()"></ne-number-input>
</div>
</div>
`,
style: `
.ne-pallete-picker {
position: relative;
user-select: none;
background-color: #333333;
padding: 24px 12px;
box-sizing: border-box;
border-radius: 3px;
.ne-color-wheel {
width: 320px;
height: 320px;
}
.ne-pallete-picker-colors {
position: relative;
border-radius: 3px;
overflow: hidden;
text-align: center;
padding: 12px;
box-sizing: border-box;
.ne-pallete-picker-colors-container {
position: relative;
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAMCAIAAADZF8uwAAAAGUlEQVQYV2M4gwH+YwCGIasIUwhT25BVBADtzYNYrHvv4gAAAABJRU5ErkJggg==);
width: 100%;
height: 28px;
border-radius: 3px;
overflow: hidden;
box-shadow: 0 0 0 1px #111111, 0 0 0 2px #666666;
& > * {
display: inline-block;
vertical-align: top;
box-sizing: border-box;
height: 100%;
& > div {
position: relative;
width: 100%;
height: 100%;
}
}
}
}
.ne-pallete-picker-slider {
position: relative;
padding: 0 50px 0 56px;
box-sizing: border-box;
.ne-pallete-picker-slider-label {
position: absolute;
top: 0;
left: 12px;
line-height: 30px;
}
.ne-h-slider {
z-index: 1;
.ne-h-slider-bg {
background-color: transparent;
}
.ne-h-slider-progress {
background-color: transparent;
}
.ne-h-slider-button {
top: -12px;
left: -4px;
width: 8px;
height: 24px;
border: solid 1px #333333;
border-radius: 3px;
box-sizing: border-box;
background-color: transparent;
box-shadow: inset 0 0 0px 2px #ffffff, inset 0 0 0 4px rgba(0, 0, 0, 0.24);
}
}
.ne-number-input {
position: absolute;
right: 12px;
top: 3px;
text-align: center;
width: 42px;
background-color: ${theme.gray.light};
box-shadow: inset 0 0 0 1px #222;
}
.ne-pallete-picker-slider-bg {
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAMCAIAAADZF8uwAAAAGUlEQVQYV2M4gwH+YwCGIasIUwhT25BVBADtzYNYrHvv4gAAAABJRU5ErkJggg==);
position: absolute;
left: 68px;
right: 62px;
top: 6px;
bottom: 6px;
z-index: 0;
border-radius: 3px;
box-shadow: 0 0 0 1px #111111, 0 0 0 2px #666666;
overflow: hidden;
& > div {
position: relative;
width: 100%;
height: 100%;
}
}
}
}
`,
requirements: [
SvgIcon,
ColorWheel,
HSlider,
NumberInput
]
})
export class PalletePicker {
@Property() pallete: string[] = [];
@Emitter() palleteChange: IEmitter<string[]>;
@Emitter() change: IEmitter<void>;
wheelColors: string[] = [];
colors: string[] = [];
hues: number[] = [];
saturation: number = 100;
lightness: number = 50;
alpha: number = 1;
saturationColorGradient = 'linear-gradient(45deg, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0))';
lightnessColorGradient = 'linear-gradient(45deg, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0))';
alphaColorGradient = 'linear-gradient(45deg, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0))';
onChanges(changes) {
if (!changes || 'pallete' in changes) {
this.wheelColors = this.pallete || [];
this.colors = this.wheelColors.concat();
this.updateProperties(this.wheelColors);
this.updateSliders(this.hues, this.saturation, this.lightness, this.alpha);
}
}
onColorWheelChange(datas: {h: number, s: number, l: number}[]) {
this.hues = (datas || []).map(d => d.h);
this.updateColors(this.hues, this.saturation, this.lightness, this.alpha);
this.updateSliders(this.hues, this.saturation, this.lightness, this.alpha);
this.commitPallete();
}
onSliderChange() {
this.updateColors(this.hues, this.saturation, this.lightness, this.alpha);
this.commitPallete();
}
getColorWidth() {
return !!this.colors && !!this.colors.length ? Math.floor(10000 / this.colors.length) / 100 + '%' : '100%';
}
updateProperties(pallete: string[]) {
if (!pallete || !pallete.length) {
this.hues = [];
this.saturation = 100;
this.lightness = 50;
this.alpha = 1;
} else {
let rgba, hsl;
this.hues = pallete.map(c => {
rgba = colorUtils.toRGBAArray(c);
hsl = colorUtils.rgbToHsl(rgba[0], rgba[1], rgba[2]);
return hsl[0];
})
const typicalColor = pallete[0];
rgba = colorUtils.toRGBAArray(typicalColor);
hsl = colorUtils.rgbToHsl(rgba[0], rgba[1], rgba[2]);
this.lightness = Math.round(hsl[2]);
this.saturation = Math.round(hsl[1]);
this.alpha = Math.round(100 * rgba[3]) / 100;
}
}
updateColors(hues: number[], s, l, a) {
if (!hues || !hues.length) {
this.colors = [];
} else {
// const typicalColor = pallete[0];
// let rgba = colorUtils.toRGBAArray(typicalColor);
// let hsl = colorUtils.rgbToHsl(rgba[0], rgba[1], rgba[2]);
// this.colors = pallete.map(color => {
this.colors = hues.map(h => {
// const rgba = colorUtils.toRGBAArray(color);
// const hsl = colorUtils.rgbToHsl(rgba[0], rgba[1], rgba[2]);
// const rgb = colorUtils.hslToRgb(hsl[0], s, l);
const rgb = colorUtils.hslToRgb(h, s, l);
return colorUtils.rgbToCSSRGB(rgb[0], rgb[1], rgb[2], a * 255);
});
}
}
updateSliders(hues: number[], s, l, a) {
const hue = hues && !!hues[0] ? hues[0] : 0;
this.saturationColorGradient = `linear-gradient(45deg, hsl(0, 100%, 100%), hsl(${hue}, 100%, 50%))`;
this.lightnessColorGradient = `linear-gradient(45deg, hsl(0, 0%, 0%), hsl(${hue}, 100%, 50%), hsl(${hue}, 100%, 100%))`;
this.alphaColorGradient = `linear-gradient(45deg, hsla(0, 100%, 100%, 0), hsl(${hue}, 100%, 50%))`;
}
protected commitPallete() {
this.pallete = this.colors.concat();
this.palleteChange.emit(this.pallete);
this.change.emit();
}
}
|
def do_conversion(list_of_commands, max_concurrent_jobs):
pool = mp.Pool(processes=max_concurrent_jobs)
try:
l_pids = pool.imap(do_execute_conversion_call, list_of_commands, chunksize=1)
except KeyboardInterrupt:
sys.stdout.write('\033[0m')
sys.stdout.write('user interrupt\n')
pool.close()
pool.join()
return l_pids |
The Tapajós River – in the heart of the Amazon – is home to thousands of people and incomparable biodiversity. But all that could change if a proposed mega-dam project moves forward.
At the moment you’re reading this, the Tapajós River is flowing unimpeded through rainforest deep in the Brazilian Amazon. Its waters teem with life – including pink river dolphins! Its banks are home to hundreds of types of birds, lizards, and amphibians, as well as mammals like the jaguar, giant anteater and ocelot.
Thousands of Munduruku Indigenous Peoples depend on this river and its thriving ecosystem for their livelihoods, as they have for centuries. But all this could change if the Brazilian government moves forward with a plan to construct a series of forty dams along the Tapajós River basin.
The largest of the proposed sites – the 7.6km-wide São Luiz do Tapajós dam – would flood so much of the Tapajós, it would create a reservoir the size of New York City (729 km²). In the process, it would wash away lakes, rock formations, islands and other crucial habitat – destroying some of the traditional lands of Munduruku people as well.
Standing with the Munduruku
Damming the river would destroy the environmental balance of the region, and the communities that depend on it. That’s why Greenpeace Brazil activists are joining forces with Munduruku leaders in protest to demand an end to the São Luiz do Tapajós dam project.
This weekend, activists joined the Munduruku on the Tapajós River to send a clear message to the world: "Damn the dam. Keep the Tapajós River alive.”
“We oppose the dam construction because our lives depend on the river and the forest and they belong to us. If the Tapajós River is dammed, we will lose our fish ground, our means of transport; we will lose our way of life,” explains a Chief from the Sawre Muybu Indigenous Land, Juarez Saw Munduruku.
A long road ahead
The Munduruku people have been fighting dam projects on their land for decades, and the Brazilian government has failed to consult them in the planning process for the São Luiz do Tapajós dam.
Instead, the political interests behind this dam and other mega-dam projects focus on corporate profit – feeding a vicious circle of corruption and misuse of public money. Other dams in the Amazon, such as the Belo Monte mega-dam on the Xingu River are even part of an ongoing corruption investigation in Brazil.
There is another way. Clean and responsible energy sources, such as solar and wind, can meet the supply needs for all Brazilians without destroying biodiversity and the livelihoods of local communities.
Join us in the first step towards that future. Stand in solidarity with the Munduruku people and act to stop the damming of the Tapajós River: share their story.
Together, we can keep the Tapajós alive.
Tica Minami is the head of the Amazon campaign at Greenpeace Brazil. |
// tests/main.cpp: create an executable file to run unit tests of everything in
// PETT. This uses Catch2 (NOT the old pre-C++11 Catch!) to provide a main()
// function but all the actual tests are in other files.
#define CATCH_CONFIG_MAIN
#include "Catch2/catch.hpp"
|
/**
* Adds am attribute to set (first creating empty set if not already present)
* @param ac
*/
public void addAttributeClass(OWLClass ac) {
if (attributeClassSet == null)
attributeClassSet = new HashSet<OWLClass>();
this.attributeClassSet.add(ac);
} |
/**
* A client of the system providing Scroll Capture capability on behalf of a Window.
* <p>
* An instance is created to wrap the selected {@link ScrollCaptureCallback}.
*
* @hide
*/
public class ScrollCaptureClient extends IScrollCaptureClient.Stub {
private static final String TAG = "ScrollCaptureClient";
private static final int DEFAULT_TIMEOUT = 1000;
private final Handler mHandler;
private ScrollCaptureTarget mSelectedTarget;
private int mTimeoutMillis = DEFAULT_TIMEOUT;
protected Surface mSurface;
private IScrollCaptureController mController;
private final Rect mScrollBounds;
private final Point mPositionInWindow;
private final CloseGuard mCloseGuard;
// The current session instance in use by the callback.
private ScrollCaptureSession mSession;
// Helps manage timeout callbacks registered to handler and aids testing.
private DelayedAction mTimeoutAction;
/**
* Constructs a ScrollCaptureClient.
*
* @param selectedTarget the target the client is controlling
* @param controller the callbacks to reply to system requests
*
* @hide
*/
public ScrollCaptureClient(
@NonNull ScrollCaptureTarget selectedTarget,
@NonNull IScrollCaptureController controller) {
requireNonNull(selectedTarget, "<selectedTarget> must non-null");
requireNonNull(controller, "<controller> must non-null");
final Rect scrollBounds = requireNonNull(selectedTarget.getScrollBounds(),
"target.getScrollBounds() must be non-null to construct a client");
mSelectedTarget = selectedTarget;
mHandler = selectedTarget.getContainingView().getHandler();
mScrollBounds = new Rect(scrollBounds);
mPositionInWindow = new Point(selectedTarget.getPositionInWindow());
mController = controller;
mCloseGuard = new CloseGuard();
mCloseGuard.open("close");
selectedTarget.getContainingView().addOnAttachStateChangeListener(
new View.OnAttachStateChangeListener() {
@Override
public void onViewAttachedToWindow(View v) {
}
@Override
public void onViewDetachedFromWindow(View v) {
selectedTarget.getContainingView().removeOnAttachStateChangeListener(this);
endCapture();
}
});
}
@VisibleForTesting
public void setTimeoutMillis(int timeoutMillis) {
mTimeoutMillis = timeoutMillis;
}
@Nullable
@VisibleForTesting
public DelayedAction getTimeoutAction() {
return mTimeoutAction;
}
private void checkConnected() {
if (mSelectedTarget == null || mController == null) {
throw new IllegalStateException("This client has been disconnected.");
}
}
private void checkStarted() {
if (mSession == null) {
throw new IllegalStateException("Capture session has not been started!");
}
}
@WorkerThread // IScrollCaptureClient
@Override
public void startCapture(Surface surface) throws RemoteException {
checkConnected();
mSurface = surface;
scheduleTimeout(mTimeoutMillis, this::onStartCaptureTimeout);
mSession = new ScrollCaptureSession(mSurface, mScrollBounds, mPositionInWindow, this);
mHandler.post(() -> mSelectedTarget.getCallback().onScrollCaptureStart(mSession,
this::onStartCaptureCompleted));
}
@UiThread
private void onStartCaptureCompleted() {
if (cancelTimeout()) {
mHandler.post(() -> {
try {
mController.onCaptureStarted();
} catch (RemoteException e) {
doShutdown();
}
});
}
}
@UiThread
private void onStartCaptureTimeout() {
endCapture();
}
@WorkerThread // IScrollCaptureClient
@Override
public void requestImage(Rect requestRect) {
checkConnected();
checkStarted();
scheduleTimeout(mTimeoutMillis, this::onRequestImageTimeout);
// Response is dispatched via ScrollCaptureSession, to onRequestImageCompleted
mHandler.post(() -> mSelectedTarget.getCallback().onScrollCaptureImageRequest(
mSession, new Rect(requestRect)));
}
@UiThread
void onRequestImageCompleted(long frameNumber, Rect capturedArea) {
final Rect finalCapturedArea = new Rect(capturedArea);
if (cancelTimeout()) {
mHandler.post(() -> {
try {
mController.onCaptureBufferSent(frameNumber, finalCapturedArea);
} catch (RemoteException e) {
doShutdown();
}
});
}
}
@UiThread
private void onRequestImageTimeout() {
endCapture();
}
@WorkerThread // IScrollCaptureClient
@Override
public void endCapture() {
if (isStarted()) {
scheduleTimeout(mTimeoutMillis, this::onEndCaptureTimeout);
mHandler.post(() ->
mSelectedTarget.getCallback().onScrollCaptureEnd(this::onEndCaptureCompleted));
} else {
disconnect();
}
}
private boolean isStarted() {
return mController != null && mSelectedTarget != null;
}
@UiThread
private void onEndCaptureCompleted() { // onEndCaptureCompleted
if (cancelTimeout()) {
doShutdown();
}
}
@UiThread
private void onEndCaptureTimeout() {
doShutdown();
}
private void doShutdown() {
try {
if (mController != null) {
mController.onConnectionClosed();
}
} catch (RemoteException e) {
// Ignore
} finally {
disconnect();
}
}
/**
* Shuts down this client and releases references to dependent objects. No attempt is made
* to notify the controller, use with caution!
*/
public void disconnect() {
if (mSession != null) {
mSession.disconnect();
mSession = null;
}
mSelectedTarget = null;
mController = null;
}
/** @return a string representation of the state of this client */
public String toString() {
return "ScrollCaptureClient{"
+ ", session=" + mSession
+ ", selectedTarget=" + mSelectedTarget
+ ", clientCallbacks=" + mController
+ "}";
}
private boolean cancelTimeout() {
if (mTimeoutAction != null) {
return mTimeoutAction.cancel();
}
return false;
}
private void scheduleTimeout(long timeoutMillis, Runnable action) {
if (mTimeoutAction != null) {
mTimeoutAction.cancel();
}
mTimeoutAction = new DelayedAction(mHandler, timeoutMillis, action);
}
/** @hide */
@VisibleForTesting
public static class DelayedAction {
private final AtomicBoolean mCompleted = new AtomicBoolean();
private final Object mToken = new Object();
private final Handler mHandler;
private final Runnable mAction;
@VisibleForTesting
public DelayedAction(Handler handler, long timeoutMillis, Runnable action) {
mHandler = handler;
mAction = action;
mHandler.postDelayed(this::onTimeout, mToken, timeoutMillis);
}
private boolean onTimeout() {
if (mCompleted.compareAndSet(false, true)) {
mAction.run();
return true;
}
return false;
}
/**
* Cause the timeout action to run immediately and mark as timed out.
*
* @return true if the timeout was run, false if the timeout had already been canceled
*/
@VisibleForTesting
public boolean timeoutNow() {
return onTimeout();
}
/**
* Attempt to cancel the timeout action (such as after a callback is made)
*
* @return true if the timeout was canceled and will not run, false if time has expired and
* the timeout action has or will run momentarily
*/
public boolean cancel() {
if (!mCompleted.compareAndSet(false, true)) {
// Whoops, too late!
return false;
}
mHandler.removeCallbacksAndMessages(mToken);
return true;
}
}
} |
// newConn make a conn to logstash(monitor data receiver)
func (fm *FlowMonitor) newConn() error {
conn, err := net.DialTimeout("tcp", fm.conf.Addr, time.Duration(time.Second*5))
if err == nil && conn != nil {
fm.conn = conn
fm.flowMonitorThrottle = false
log.Info("init flow monitor conn to: %s", fm.conf.Addr)
return nil
} else {
log.Error("flow monitor conn failed: %s: %v", fm.conf.Addr, err)
fm.flowMonitorThrottle = true
return err
}
} |
def do_inc(self, val):
self.set_half_carry_flag((val & 0x0F) == 0x0F)
val += 1
self.set_zero_flag(val == 0)
self.set_subtract_flag(False)
return val |
/**
* Read the file content
* from the {@link LevelReader#CONTENT_FILES} file
* relying the declared tiles
*/
static @NotNull LevelReaderResult readContent(
@NotNull Path levelDirectory,
@NotNull Map<Character, Integer> tiles
) throws IOException {
Path elementsFile = getFile(levelDirectory, CONTENT_FILES);
List<String> contentContent = Files.readAllLines(elementsFile);
int height = contentContent.size();
int width = contentContent.get(0).length();
int[] content = new int[height * width];
for (int lineIndex = 0; lineIndex < height; lineIndex++) {
String contentLine = contentContent.get(lineIndex);
if (contentLine.length() != width) {
throw new IllegalArgumentException(
"[" +
contentLine +
"] is not " +
width +
" characters long at line " +
lineIndex +
" of " +
elementsFile.toAbsolutePath());
}
for (int columnIndex = 0; columnIndex < width; columnIndex++) {
char c = contentLine.charAt(columnIndex);
if (!tiles.containsKey(c)) {
throw new IllegalArgumentException(
"Unknown tile [" +
c +
"] at line " +
lineIndex +
" of " +
elementsFile.toAbsolutePath());
}
int position = (lineIndex * width) + columnIndex;
content[position] = tiles.get(c);
}
}
return new LevelReaderResult(width, height, content);
} |
A NOVEL DECODING ALGORITHM FOR REVERSIBLE VARIABLE LENGTH CODES BASED ON THE MASSEY METRIC
The use of Variable Length Codes (VLC) to increase the coding efficiency based on the statistical characteristics of the data has long been employed in digital communication systems. However the catastrophic degradation of VLC encoded data which results from bit errors has led to the development of reversible variable length codes (RVLC) that can be decoded bidirectionally thereby having more efficient error resilient capabilities. Up to this point, no soft decoding algorithm for RVLC has been devised that effectively exploit the bidirectional characteristics of such codes. This paper introduces a novel decoding algorithm for reversible variable length codes that make use of the Massey metric while limiting the complexity of a purely unidirectional sequential decoder. |
/* tslint:disable */
/* eslint-disable */
/**
* Rosetta
* Build Once. Integrate Your Blockchain Everywhere.
*
* The version of the OpenAPI document: 1.4.10
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import { exists } from '../runtime';
import { Amount, AmountFromJSON, AmountToJSON, BlockIdentifier, BlockIdentifierFromJSON, BlockIdentifierToJSON } from './';
/**
* An AccountBalanceResponse is returned on the /account/balance endpoint. If an account has a balance for each AccountIdentifier describing it (ex: an ERC-20 token balance on a few smart contracts), an account balance request must be made with each AccountIdentifier. The `coins` field was removed and replaced by by `/account/coins` in `v1.4.7`.
* @export
* @interface AccountBalanceResponse
*/
export interface AccountBalanceResponse {
/**
*
* @type {BlockIdentifier}
* @memberof AccountBalanceResponse
*/
block_identifier: BlockIdentifier;
/**
* A single account may have a balance in multiple currencies.
* @type {Array<Amount>}
* @memberof AccountBalanceResponse
*/
balances: Array<Amount>;
/**
* Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address.
* @type {object}
* @memberof AccountBalanceResponse
*/
metadata?: object;
}
export function AccountBalanceResponseFromJSON(json: any): AccountBalanceResponse {
return AccountBalanceResponseFromJSONTyped(json, false);
}
export function AccountBalanceResponseFromJSONTyped(json: any, ignoreDiscriminator: boolean): AccountBalanceResponse {
if (json === undefined || json === null) {
return json;
}
return {
block_identifier: BlockIdentifierFromJSON(json['block_identifier']),
balances: (json['balances'] as Array<any>).map(AmountFromJSON),
metadata: !exists(json, 'metadata') ? undefined : json['metadata'],
};
}
export function AccountBalanceResponseToJSON(value?: AccountBalanceResponse | null): any {
if (value === undefined) {
return undefined;
}
if (value === null) {
return null;
}
return {
block_identifier: BlockIdentifierToJSON(value.block_identifier),
balances: (value.balances as Array<any>).map(AmountToJSON),
metadata: value.metadata,
};
}
|
ZORKMID
TYPES OF KNOWN CURRENCY TYPE YEAR PORTRAIT DESCRIPTION IMAGE 1zm copper coin 699 Belwit the Flat The coin bears the likeness of Belwit the Flat, along with the inscriptions, "One Zorkmid," and "699 GUE." On the other side, the coin depicts Egreth Castle, and says "In Frobs We Trust" inseveral languages. front
back
both
pile of (A)
pile of (B) 1zm gold coin ? Dimwit Flathead The coin pictures a man with an incredibly flat head, wearing a gaudy crown. 1zm coin (A) 857 Idwit Oogle Flathead front 1zm coin (B) 657 Idwit Oogle Flathead This coin is most likely a misprint of the above. front 5zm coin ? ? This is a rare quint coin, similar in shape and size to an old Chinese coin, complete with a square hole in its center. Stamped on its face is "5 Zorkmids." 5zm bill ? ? ? 10zm bill ? ? ? 100zm bill 719 Dimwit Flathead Each bill is worth 100 zorkmids, and bears the legend "In Frobs We Trust." front 500zm gold coin ? ? ? 500zm bill ? unknown person ? front 10000zm coin 722 Dimwit Flathead This is a beautiful octagonal coin bearing the legends "Ten Thousand Zorkmids" and "In Frobs We Trust". (These were rare collector's items by the mid-tenth century). front 100000zm bill ? ? The denomination of the bill is 100,000 zorkmids. Only one such bill was ever printed, and that was at the personal request of J. Pierpont Flathead. ? silver coin ? Boat This zorkmid is possibly an Obolus. These grow on zorkmid trees and have been used to buy passage to Ferryman's Isle. front
TRIVIA:
The zorkmid is the substance of an age-old question, "Is a zorkmid truly the root of boll weevils?"
SOURCE(S): X ??? (for boll weevil question)
"A zorkmid saved is a zorkmid earned."-unknownThe gold zorkmid was the main unit of currency of the Great Underground Empire, and is still used in most parts of the kingdom today. Although zorkmids (as well as zorkies and zorkles) had been in circulation long before even the time of Duncanthranx, curtosey of the zorkmid tree, the first zorkmid was minted on Augur 16, 699 GUE, during the reign of Belwit the Flat. This coin bore the likeness of Belwit, the year, and the coin's value on the front, while on the back it had a picture of Egreth Castle, with the motto "In Frobs We Trust" stamped in two different languages.Since the steady supply of zorkmid coinage had gradually dwindled after the Zorkmid Blight, it was Belwit's creative and daring response to this economic crisis that saw the birth of the minted zorkmid, the stable currency upon which we still rely today. He took the radical step of ordering the minting of the first regular issue of Quendoran zorkmids. Although the order itself came down from the throne in 695, it was not until the third week in Augur, 699, that the mints underneath Egreth were finally readied to stamp the first coin with a design that had been four years in the making. Historians are still puzzled over the reasons behind Belwit's decision to begin the production of official, government-approved zorkmids. Regardless, it is clear that the advanced system of currency that was put into official use by Belwit was not in fact a new system.Zorkmids of different denominations were later issued, including the 500 and 10,000 zm coins, and the 100 zm bill. The largest issue was a 100,000 zm bill. Only one of these was ever printed, at the request of J. Pierpont Flathead.One of the biggest bafflements of history are the inscriptions found upon the 100zm bill and the 10,000 zm coin. The bill is dated in the year 719 GUE, and respectively, the coin in the year 722, but Dimwit Flathead, whose face appears on both, was not even born until the year 723. Countless theories have abounded in attempt to solve this mystery, the most popular being: it was an unauthentic forgery, a misprint, a different Flathead, or another case of time travel. A time travel device was once present in the Royal Museum, so time travel is definitely not out of the question, and perhaps should taken more seriously as the solution to this dilemma.Another bizarity is the "657 GUE" Idwit Oogle Flathead 1zm coin, which is indentical in every respect to the "857 GUE" coin save the date. Although a few historians suspect this as another case of time travel, this one is most likely due to a misprint.Other events linked to the zorkmid include the minting of thousands upon thousands of golden zorkmids for the sole purpose of being spent by the royal treasury in an effort to finance Dimwit's remarkable and excessive ceremonies and construction projects. It should also be noted that by the end of 789 GUE, every single zorkmid of commerce in the Great Underground Empire was controlled by FrobozzCo Inc.It may also be interesting to note that although the zorkmid was not being minted yet, by the end of the reign of Zylon the Aged (398 GUE), nearly every coin in the Quendor treasury bore the wizened likeness of him; no other king had held the throne in over three hundred years. Even in those days, the gold pieces minted in the distant age of Entharion the Wise and Mysterion the Brave were valuable collector’s items almost impossible to find.CONVERSION CHART1 zorkmid = 10 silver zorkles1 zorkimd = 100 copper zorkieZorkmid Change Machine (A) |
package session
import (
"bytes"
"context"
"crypto/rand"
"encoding/binary"
"errors"
"github.com/walkline/ToCloud9/apps/authserver/service"
"io"
"io/ioutil"
"net"
"os"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/walkline/ToCloud9/apps/authserver/crypto/srp6"
"github.com/walkline/ToCloud9/apps/authserver/repo"
)
type Status uint8
const (
StatusChallenge Status = iota
StatusLogonProof
StatusReconnectProof
StatusAuthed
StatusClosed
)
type AuthResult uint8
const (
AuthResultSuccess AuthResult = 0
AuthResultBanned AuthResult = 3
AuthResultUnkAccount AuthResult = 4
AuthResultIncorrectPassword AuthResult = 5
AuthResultAlreadyOnline AuthResult = 6
AuthResultNoTime AuthResult = 7
)
type Command uint8
const (
CommandLogonChallenge Command = iota
CommandLogonProof
CommandReconnectChallenge
CommandReconnectProof
CommandRealmList Command = 0x10
)
type CommandData struct {
ValidStatus Status
Size int
}
var Commands = map[Command]Status{
CommandLogonChallenge: StatusChallenge,
CommandLogonProof: StatusLogonProof,
CommandReconnectChallenge: StatusChallenge,
CommandReconnectProof: StatusReconnectProof,
CommandRealmList: StatusAuthed,
}
type AuthSession struct {
logger zerolog.Logger
accountRepo repo.AccountRepo
realmService service.RealmService
conn net.Conn
srp *srp6.SRP6
status Status
account *repo.Account
reconnectProof []byte
}
func NewAuthSession(conn net.Conn, accountRepo repo.AccountRepo, realmService service.RealmService) *AuthSession {
return &AuthSession{
conn: conn,
logger: log.Logger.With().Str("address", conn.RemoteAddr().String()).Logger(),
accountRepo: accountRepo,
realmService: realmService,
}
}
func (s *AuthSession) ListenAndProcess() {
s.logger.Debug().Msg("New connection")
defer func(t time.Time) {
_ = s.conn.Close()
s.logger.Debug().Msgf("Socket closed. Session lifetime: %v.", time.Since(t))
}(time.Now())
opcode := make([]byte, 1)
for {
err := s.conn.SetDeadline(time.Now().UTC().Add(time.Minute * 2))
if err != nil {
s.logger.Error().Err(err).Msg("can't set deadline")
return
}
_, err = s.conn.Read(opcode)
if err != nil {
// not really an error at this stage
if errors.Is(err, io.EOF) {
return
} else if errors.Is(err, os.ErrDeadlineExceeded) {
s.logger.Debug().Msg("disconnect idle connection")
return
}
s.logger.Error().Err(err).Msg("can't read opcode")
return
}
validStatus, found := Commands[Command(opcode[0])]
if !found {
s.logger.Debug().Msgf("unk command 0x%X", opcode[0])
return
}
if s.status != validStatus {
_ = s.conn.Close()
s.logger.Error().Msgf("invalid status, cmd - 0x%X, expected - %d, have - %d", opcode[0], validStatus, s.status)
return
}
switch Command(opcode[0]) {
case CommandLogonChallenge:
err = s.HandleLogonChallenge()
case CommandLogonProof:
err = s.HandleLogonProof()
case CommandRealmList:
err = s.HandleRealmList()
case CommandReconnectChallenge:
err = s.HandleReconnectChallenge()
case CommandReconnectProof:
err = s.HandleReconnectProof()
}
if err != nil {
s.logger.Error().Err(err).Msgf("can't process command 0x%X", opcode[0])
_ = s.conn.Close()
return
}
}
}
func (s *AuthSession) HandleReconnectChallenge() error {
s.status = StatusClosed
type payload struct {
Err uint8
Size uint16
GameName [4]byte
Version1 uint8
Version2 uint8
Version3 uint8
Build uint16
Platform [4]byte
OS [4]byte
Country [4]byte
TimezoneBias uint32
IP uint32
ILen uint8
}
d := payload{}
err := binary.Read(s.conn, binary.LittleEndian, &d)
if err != nil {
return err
}
login := make([]byte, d.ILen)
_, err = s.conn.Read(login)
if err != nil {
return err
}
username := string(login)
s.logger = s.logger.With().Str("login", username).Logger()
s.logger.Debug().Interface("payload", &d).Msg("Received reconnect challenge")
s.account, err = s.accountRepo.AccountByUserName(context.TODO(), username)
if err != nil {
return err
}
s.reconnectProof = make([]byte, 16)
_, err = rand.Read(s.reconnectProof)
if err != nil {
return err
}
s.status = StatusReconnectProof
err = s.Write(
CommandReconnectChallenge,
AuthResultSuccess,
s.reconnectProof,
[]byte{0xBA, 0xA3, 0x1E, 0x99, 0xA0, 0x0B, 0x21, 0x57, 0xFC, 0x37, 0x3F, 0xB3, 0x69, 0xCD, 0xD2, 0xF1},
)
if err != nil {
return err
}
return nil
}
func (s *AuthSession) HandleReconnectProof() error {
s.logger.Debug().Msg("Handling reconnect proof")
type payload struct {
R1 [16]byte
R2 [20]byte
R3 [20]byte
NumberOfKeys uint8
}
d := payload{}
err := binary.Read(s.conn, binary.LittleEndian, &d)
if err != nil {
return err
}
if !srp6.ReconnectChallengeValid(s.account.Username, d.R1[:], d.R2[:], s.reconnectProof, s.account.SessionKeyAuth) {
return errors.New("received bad password during reconnect proof")
}
err = s.Write(
CommandReconnectProof,
AuthResultSuccess,
uint16(0),
)
if err != nil {
return err
}
s.status = StatusAuthed
return nil
}
func (s *AuthSession) HandleLogonChallenge() error {
s.status = StatusClosed
type payload struct {
Err uint8
Size uint16
GameName [4]byte
Version1 uint8
Version2 uint8
Version3 uint8
Build uint16
Platform [4]byte
OS [4]byte
Country [4]byte
TimezoneBias uint32
IP uint32
ILen uint8
}
d := payload{}
err := binary.Read(s.conn, binary.LittleEndian, &d)
if err != nil {
return err
}
login := make([]byte, d.ILen)
_, err = s.conn.Read(login)
if err != nil {
return err
}
username := string(login)
s.logger = s.logger.With().Str("login", username).Logger()
s.logger.Debug().Interface("payload", &d).Msg("Received login challenge")
s.account, err = s.accountRepo.AccountByUserName(context.TODO(), username)
if err != nil {
return err
}
if s.account == nil {
return s.Write(CommandLogonProof, AuthResultUnkAccount, uint16(0))
}
s.srp = srp6.NewSRP(string(login), s.account.Salt, s.account.Verifier)
B, g, N, _s := s.srp.DataForClient()
err = s.Write(
[]byte{byte(CommandLogonChallenge), 0},
AuthResultSuccess,
B,
byte(1),
g,
byte(32),
N,
_s,
[]byte{0xBA, 0xA3, 0x1E, 0x99, 0xA0, 0x0B, 0x21, 0x57, 0xFC, 0x37, 0x3F, 0xB3, 0x69, 0xCD, 0xD2, 0xF1},
byte(0),
)
if err != nil {
return err
}
s.status = StatusLogonProof
return nil
}
func (s *AuthSession) HandleLogonProof() error {
s.logger.Debug().Msg("Handling logon proof")
type payload struct {
A [32]byte
ClientM [20]byte
CRCHash [20]byte
NumberOfKeys uint8
SecurityFlags uint8
}
d := payload{}
err := binary.Read(s.conn, binary.LittleEndian, &d)
if err != nil {
return err
}
K := s.srp.VerifyChallengeResponse(d.A[:], d.ClientM[:])
if K == nil {
s.logger.Debug().Msg("Received bad password")
return s.Write(CommandLogonProof, AuthResultUnkAccount, uint16(0))
}
s.account.SessionKeyAuth = K
err = s.accountRepo.UpdateAccount(context.TODO(), s.account)
if err != nil {
return err
}
type responsePayload struct {
CMD uint8
Err uint8
M2 [20]byte
AccountFlags uint32
SurveyID uint32
LoginFlags uint16
}
response := responsePayload{
CMD: uint8(CommandLogonProof),
Err: 0,
M2: srp6.GetSessionVerifier(d.A[:], d.ClientM[:], K),
AccountFlags: 0x00800000,
SurveyID: 0,
LoginFlags: 0,
}
err = s.Write(&response)
if err != nil {
return err
}
s.status = StatusAuthed
return nil
}
func (s *AuthSession) HandleRealmList() error {
var realmList []service.RealmListItem
s.logger.Debug().Msg("Handling realm list")
defer func(t time.Time) {
s.logger.Debug().Interface("realms", realmList).Msgf("Processed realm list. Took %v time.", time.Since(t))
}(time.Now())
// need to read 4 bytes, but we don't need them
unk := uint32(0)
err := binary.Read(s.conn, binary.LittleEndian, &unk)
if err != nil {
return err
}
realmList, err = s.realmService.RealmListForAccount(context.TODO(), s.account)
if err != nil {
return err
}
pkt := new(bytes.Buffer)
for _, realm := range realmList {
err = s.write(pkt,
realm.Icon,
realm.Locked,
realm.Flag,
realm.Name,
realm.Address,
realm.PopulationLevel,
realm.CharsCount,
realm.Timezone,
uint8(realm.ID),
)
if err != nil {
return err
}
}
err = s.write(pkt, uint8(0x10), uint8(0x00))
if err != nil {
return err
}
pktData, err := ioutil.ReadAll(pkt)
if err != nil {
return err
}
err = s.Write(
CommandRealmList,
uint16(len(pktData)+6),
uint32(0),
uint16(len(realmList)),
pktData,
)
if err != nil {
return err
}
s.status = StatusAuthed
return nil
}
func (s *AuthSession) Write(v ...interface{}) error {
return s.write(s.conn, v...)
}
func (s *AuthSession) write(writer io.Writer, v ...interface{}) error {
var err error
for i := range v {
d := v[i]
switch d.(type) {
case string:
d = append([]byte(d.(string)), 0)
}
err = binary.Write(writer, binary.LittleEndian, d)
if err != nil {
return err
}
}
return nil
}
|
‘One Person Company’ Under the Companies Act, 2013 — Journey From ‘Minimum Two Person’to’only One Person’: A Critical Reappraisal
The One Person Company (hereinafter referred to as ‘OPC’) is of recent origin in the Indian legal system which is quite similar to the sole proprietary form of business. The notion of OPC is just like a ‘one man army’. This revolutionary concept of OPC aims pave the way for a more modern and dynamic legislation, to enable growth and greater regulation of the corporate sector in India. OPC provides a whole new bracket of opportunities for those who look forward to start their own ventures with a structure of organized business. This paper gives a basic knowledge about OPC and helps in understanding its concept. It dwells comprehensively upon the concept of OPC, from its inception as philanthropy till its journey to a statutory form with special reference to the provisions of the Companies Act, 2013. |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Path Planning Sample Code with RRT*
author: AtsushiSakai(@Atsushi_twi)
with edits of <NAME>(@MaxMagazin)
"""
import random
import math
import copy
import numpy as np
import matplotlib.pyplot as plt
import time
import sys, select, termios, tty
class RRT():
def __init__(self, start, planDistance, obstacleList, expandDis=0.5, turnAngle=30, maxIter=400, rrtTargets = None):
self.start = Node(start[0], start[1], start[2])
self.startYaw = start[2]
self.planDistance = planDistance
self.expandDis = expandDis
self.turnAngle = math.radians(turnAngle)
self.maxDepth = int(planDistance / expandDis)
self.maxIter = 400 #maxIter
self.obstacleList = obstacleList
self.rrtTargets = rrtTargets
self.aboveMaxDistance = 0
self.belowMaxDistance = 0
self.collisionHit = 0
self.doubleNodeCount = 0
self.savedRandoms = []
def Planning(self, animation=False, interactive=False):
self.nodeList = [self.start]
self.leafNodes = []
for i in range(self.maxIter):
rnd = self.get_random_point_from_target_list()
nind = self.GetNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
if (nearestNode.cost >= self.planDistance):
continue
newNode = self.steerConstrained(rnd, nind)
if newNode in self.nodeList:
continue
if self.__CollisionCheck(newNode, self.obstacleList):
self.nodeList.append(newNode)
if (newNode.cost >= self.planDistance):
self.leafNodes.append(newNode)
if animation:
self.DrawSample(rnd)
if interactive:
key = self.getKey()
if (key == '\x03'): #CTRL+C
break
return self.nodeList, self.leafNodes
def getKey(self):
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def choose_parent(self, newNode, nearinds):
if len(nearinds) == 0:
return newNode
dlist = []
for i in nearinds:
dx = newNode.x - self.nodeList[i].x
dy = newNode.y - self.nodeList[i].y
d = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
if self.check_collision_extend(self.nodeList[i], theta, d):
dlist.append(self.nodeList[i].cost + d)
else:
dlist.append(float("inf"))
mincost = min(dlist)
minind = nearinds[dlist.index(mincost)]
if mincost == float("inf"):
print("mincost is inf")
return newNode
newNode.cost = mincost
newNode.parent = minind
return newNode
def steerConstrained(self, rnd, nind):
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
angleChange = self.pi_2_pi(theta - nearestNode.yaw)
angle30degree = math.radians(30)
if angleChange > angle30degree:
angleChange = self.turnAngle
elif angleChange >= -angle30degree:
angleChange = 0
else:
angleChange = -self.turnAngle
newNode = copy.deepcopy(nearestNode)
newNode.yaw += angleChange
newNode.x += self.expandDis * math.cos(newNode.yaw)
newNode.y += self.expandDis * math.sin(newNode.yaw)
newNode.cost += self.expandDis
newNode.parent = nind
return newNode
def pi_2_pi(self, angle):
return (angle + math.pi) % (2*math.pi) - math.pi
def steer(self, rnd, nind):
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.cost += self.expandDis
newNode.parent = nind
return newNode
def get_random_point(self):
randX = random.uniform(0, self.planDistance)
randY = random.uniform(-self.planDistance, self.planDistance)
rnd = [randX, randY]
car_rot_mat = np.array([[math.cos(self.startYaw), -math.sin(self.startYaw)], [math.sin(self.startYaw), math.cos(self.startYaw)]])
rotatedRnd = np.dot(car_rot_mat, rnd)
rotatedRnd = [rotatedRnd[0] + self.start.x, rotatedRnd[1] + self.start.y]
return rotatedRnd
def get_random_point_from_target_list(self):
maxTargetAroundDist = 3
if not self.rrtTargets:
return self.get_random_point()
targetId = np.random.randint(len(self.rrtTargets))
x, y, oSize = self.rrtTargets[targetId]
randAngle = random.uniform(0, 2 * math.pi)
randDist = random.uniform(oSize, maxTargetAroundDist)
finalRnd = [x + randDist * math.cos(randAngle), y + randDist * math.sin(randAngle)]
return finalRnd
def get_best_last_index(self):
disglist = [self.calc_dist_to_goal(
node.x, node.y) for node in self.nodeList]
goalinds = [disglist.index(i) for i in disglist if i <= self.expandDis]
if len(goalinds) == 0:
return None
mincost = min([self.nodeList[i].cost for i in goalinds])
for i in goalinds:
if self.nodeList[i].cost == mincost:
return i
return None
def gen_final_course(self, goalind):
path = [[self.end.x, self.end.y]]
while self.nodeList[goalind].parent is not None:
node = self.nodeList[goalind]
path.append([node.x, node.y])
goalind = node.parent
path.append([self.start.x, self.start.y])
return path
def calc_dist_to_goal(self, x, y):
return np.linalg.norm([x - self.end.x, y - self.end.y])
def find_near_nodes(self, newNode):
nnode = len(self.nodeList)
r = self.expandDis * 3.0
dlist = [(node.x - newNode.x) ** 2 +
(node.y - newNode.y) ** 2 for node in self.nodeList]
nearinds = [dlist.index(i) for i in dlist if i <= r ** 2]
return nearinds
def rewire(self, newNode, nearinds):
nnode = len(self.nodeList)
for i in nearinds:
nearNode = self.nodeList[i]
dx = newNode.x - nearNode.x
dy = newNode.y - nearNode.y
d = math.sqrt(dx ** 2 + dy ** 2)
scost = newNode.cost + d
if nearNode.cost > scost:
theta = math.atan2(dy, dx)
if self.check_collision_extend(nearNode, theta, d):
nearNode.parent = nnode - 1
nearNode.cost = scost
def check_collision_extend(self, nearNode, theta, d):
tmpNode = copy.deepcopy(nearNode)
for i in range(int(d / self.expandDis)):
tmpNode.x += self.expandDis * math.cos(theta)
tmpNode.y += self.expandDis * math.sin(theta)
if not self.__CollisionCheck(tmpNode, self.obstacleList):
return False
return True
def DrawSample(self, rnd=None):
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
axes = plt.gca()
for (ox, oy, size) in self.obstacleList:
circle = plt.Circle((ox,oy), radius=size)
axes.add_patch(circle)
plt.plot(self.start.x, self.start.y, "xr")
axes = plt.gca()
xmin, xmax, ymin, ymax = -5, 25, -20, 20
axes.set_xlim([xmin,xmax])
axes.set_ylim([ymin,ymax])
plt.grid(True)
plt.pause(0.001)
def DrawGraph(self):
ax = plt.gca()
for (ox, oy, size) in self.obstacleList:
circle = plt.Circle((ox,oy), radius=size)
ax.add_patch(circle)
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
plt.axis([-5, 45, -20, 20])
plt.axis("equal")
plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def __CollisionCheck(self, node, obstacleList):
for (ox, oy, size) in obstacleList:
dx = ox - node.x
dy = oy - node.y
d = dx * dx + dy * dy
if d <= size ** 2:
return False
return True
class Node():
"""
RRT Node
"""
def __init__(self, x, y, yaw):
self.x = x
self.y = y
self.yaw = yaw
self.cost = 0.0
self.parent = None
def __str__(self):
return str(round(self.x, 2)) + "," + str(round(self.y,2)) + "," + str(math.degrees(self.yaw)) + "," + str(self.cost)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.yaw == other.yaw and self.cost == other.cost
def __repr__(self):
return str(self)
def main():
print("Start rrt planning!")
start = [0.0, 0.0, math.radians(0.0)]
planDistance = 10
iterationNumber = 400
rrtConeTargets = []
rrt = RRT(start, planDistance, obstacleList=obstacleList, expandDis=1, maxIter=iterationNumber, rrtTargets = rrtConeTargets)
rrt.Planning(True, True)
if __name__ == '__main__':
main()
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.spi.discovery.tcp;
import org.apache.ignite.*;
import org.apache.ignite.configuration.*;
import org.apache.ignite.internal.*;
import org.apache.ignite.internal.util.typedef.*;
import org.apache.ignite.internal.util.typedef.internal.*;
import org.apache.ignite.spi.discovery.tcp.ipfinder.*;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*;
import org.apache.ignite.testframework.*;
import org.apache.ignite.testframework.junits.common.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
import static org.apache.ignite.events.EventType.*;
/**
* Test for {@link TcpDiscoverySpi}.
*/
public class TcpDiscoveryMultiThreadedTest extends GridCommonAbstractTest {
/** */
private static final int GRID_CNT = 5;
/** */
private static final int CLIENT_GRID_CNT = 5;
/** */
private static final ThreadLocal<Boolean> clientFlagPerThread = new ThreadLocal<>();
/** */
private static volatile boolean clientFlagGlobal;
/**
* @return Client node flag.
*/
private static boolean client() {
Boolean client = clientFlagPerThread.get();
return client != null ? client : clientFlagGlobal;
}
/** */
private TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
/**
* @throws Exception If fails.
*/
public TcpDiscoveryMultiThreadedTest() throws Exception {
super(false);
}
/** {@inheritDoc} */
@SuppressWarnings({"IfMayBeConditional"})
@Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(gridName);
if (client())
cfg.setClientMode(true);
cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(ipFinder));
cfg.setCacheConfiguration();
cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED);
cfg.setIncludeProperties();
return cfg;
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
stopAllGrids();
super.afterTest();
}
/** {@inheritDoc} */
@Override protected long getTestTimeout() {
return 3 * 60 * 1000;
}
/**
* @throws Exception If any error occurs.
*/
public void testMultiThreadedClientsRestart() throws Exception {
clientFlagGlobal = false;
info("Test timeout: " + (getTestTimeout() / (60 * 1000)) + " min.");
startGridsMultiThreaded(GRID_CNT);
clientFlagGlobal = true;
startGridsMultiThreaded(GRID_CNT, CLIENT_GRID_CNT);
final AtomicBoolean done = new AtomicBoolean();
final AtomicInteger clientIdx = new AtomicInteger(GRID_CNT);
IgniteInternalFuture<?> fut1 = multithreadedAsync(
new Callable<Object>() {
@Override public Object call() throws Exception {
clientFlagPerThread.set(true);
int idx = clientIdx.getAndIncrement();
while (!done.get()) {
stopGrid(idx, true);
startGrid(idx);
}
return null;
}
},
CLIENT_GRID_CNT
);
Thread.sleep(getTestTimeout() - 60 * 1000);
done.set(true);
fut1.get();
}
/**
* @throws Exception If any error occurs.
*/
public void testMultiThreadedClientsServersRestart() throws Exception {
fail("https://issues.apache.org/jira/browse/IGNITE-1123");
clientFlagGlobal = false;
info("Test timeout: " + (getTestTimeout() / (60 * 1000)) + " min.");
startGridsMultiThreaded(GRID_CNT);
clientFlagGlobal = true;
startGridsMultiThreaded(GRID_CNT, CLIENT_GRID_CNT);
final AtomicBoolean done = new AtomicBoolean();
final AtomicInteger clientIdx = new AtomicInteger(GRID_CNT);
IgniteInternalFuture<?> fut1 = multithreadedAsync(
new Callable<Object>() {
@Override public Object call() throws Exception {
clientFlagPerThread.set(true);
int idx = clientIdx.getAndIncrement();
while (!done.get()) {
stopGrid(idx);
startGrid(idx);
}
return null;
}
},
CLIENT_GRID_CNT
);
final BlockingQueue<Integer> srvIdx = new LinkedBlockingQueue<>();
for (int i = 0; i < GRID_CNT; i++)
srvIdx.add(i);
IgniteInternalFuture<?> fut2 = multithreadedAsync(
new Callable<Object>() {
@Override public Object call() throws Exception {
clientFlagPerThread.set(false);
while (!done.get()) {
int idx = srvIdx.take();
stopGrid(idx);
startGrid(idx);
srvIdx.add(idx);
}
return null;
}
},
GRID_CNT - 1
);
Thread.sleep(getTestTimeout() - 60 * 1000);
done.set(true);
fut1.get();
fut2.get();
}
/**
* @throws Exception If any error occurs.
*/
public void testTopologyVersion() throws Exception {
clientFlagGlobal = false;
startGridsMultiThreaded(GRID_CNT);
long prev = 0;
for (Ignite g : G.allGrids()) {
IgniteKernal kernal = (IgniteKernal)g;
long ver = kernal.context().discovery().topologyVersion();
info("Top ver: " + ver);
if (prev == 0)
prev = ver;
}
info("Test finished.");
}
/**
* @throws Exception If any error occurs.
*/
public void testMultipleStartOnCoordinatorStop() throws Exception{
clientFlagGlobal = false;
startGrids(GRID_CNT);
final CyclicBarrier barrier = new CyclicBarrier(GRID_CNT + 4);
final AtomicInteger startIdx = new AtomicInteger(GRID_CNT);
IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Override public Object call() throws Exception {
barrier.await();
Ignite ignite = startGrid(startIdx.getAndIncrement());
assertFalse(ignite.configuration().isClientMode());
log.info("Started node: " + ignite.name());
return null;
}
}, GRID_CNT + 3, "start-thread");
barrier.await();
U.sleep(ThreadLocalRandom.current().nextInt(10, 100));
for (int i = 0; i < GRID_CNT; i++)
stopGrid(i);
fut.get();
}
}
|
<reponame>ankuralld5999/LeetCode-Problems
// Problem: https://leetcode.com/problems/flatten-binary-tree-to-linked-list
// Author: github.com/ankuralld5999
// Time: O(N)
// Space: O(logN)
class Solution {
private:
TreeNode* helper(TreeNode *root) {
if (!root) return NULL;
auto leftEnd = helper(root->left), rightEnd = helper(root->right), right = root->right;
root->right = root->left;
root->left = NULL;
if (leftEnd) leftEnd->right = right;
else root->right = right;
return rightEnd ? rightEnd : (leftEnd ? leftEnd : root);
}
public:
void flatten(TreeNode* root) {
helper(root);
}
};
|
Prevention of perinatal transmission of hepatitis B Virus (HB V): A comparison of two prophylactic schedules
Perinatal transmission of hepatitis B virus (HBV) from HBsAg carrier mothers who were HBeAg+, antiHBe+, or negative for both HBe markers, was interrupted using either 4 doses of vaccine, or one dose of hepatitis B immunoglobulin (HBIG) at birth, combined with 4 doses of vaccine. In those infants who received HBlG at birth, the antiHBs titre was significantly higher at 1 and 2 months old, but at 6, 9, and 18 months old, there was no significant difference. |
// ValidChecksum checks whether the given checksum corresponds to the given address.
func ValidChecksum(address Hash, checksum Trytes) error {
actualChecksum, err := Checksum(address)
if err != nil {
return err
}
if checksum != actualChecksum {
return ErrInvalidChecksum
}
return nil
} |
def _ignore_line(match_obj):
nonlocal ignored
ignored.append(match_obj.group(0))
nonlocal count
old_count = count
count += 1
return LABEL_PREFIX + str(old_count) + LABEL_SUFFIX |
<gh_stars>100-1000
//
// SDLOnSystemCapabilityUpdated.h
// SmartDeviceLink
//
// Created by Nicole on 2/6/19.
// Copyright © 2019 smartdevicelink. All rights reserved.
//
#import "SDLRPCNotification.h"
@class SDLSystemCapability;
NS_ASSUME_NONNULL_BEGIN
/**
* A notification to inform the connected device that a specific system capability has changed.
*/
@interface SDLOnSystemCapabilityUpdated : SDLRPCNotification
/**
* Convenience init for required parameters
*
* @param systemCapability The system capability that has been updated
* @return A SDLOnSystemCapabilityUpdated object
*/
- (instancetype)initWithSystemCapability:(SDLSystemCapability *)systemCapability;
/**
* The system capability that has been updated.
*
* SDLSystemCapability, Required
*/
@property (strong, nonatomic) SDLSystemCapability *systemCapability;
@end
NS_ASSUME_NONNULL_END
|
/**
* An isolated V8Runtime. All JavaScript execution must exist
* on a single runtime, and data is not shared between runtimes.
* A runtime must be created and released when finished.
*
* All access to a runtime must come from the same thread, unless
* the thread explicitly gives up control using the V8Locker.
*
* A public static factory method can be used to create the runtime.
*
* V8 runtime = V8.createV8Runtime();
*
*/
public class V8 extends V8Object {
private static Object lock = new Object();
private volatile static int runtimeCounter = 0;
private static String v8Flags = null;
private static boolean initialized = false;
protected Map<Long, V8Value> v8WeakReferences = new HashMap<Long, V8Value>();
private Map<String, Object> data = null;
private final V8Locker locker;
private SignatureProvider signatureProvider = null;
private long objectReferences = 0;
private long v8RuntimePtr = 0;
private List<Releasable> resources = null;
private V8Map<V8Executor> executors = null;
private boolean forceTerminateExecutors = false;
private Map<Long, MethodDescriptor> functionRegistry = new HashMap<Long, MethodDescriptor>();
private LinkedList<ReferenceHandler> referenceHandlers = new LinkedList<ReferenceHandler>();
private LinkedList<V8Runnable> releaseHandlers = new LinkedList<V8Runnable>();
private static boolean nativeLibraryLoaded = false;
private static Error nativeLoadError = null;
private static Exception nativeLoadException = null;
private static V8Value undefined = new Undefined();
private static Object invalid = new Object();
private class MethodDescriptor {
Object object;
Method method;
JavaCallback callback;
JavaVoidCallback voidCallback;
boolean includeReceiver;
}
private synchronized static void load(final String tmpDirectory) {
try {
LibraryLoader.loadLibrary(tmpDirectory);
nativeLibraryLoaded = true;
} catch (Error e) {
nativeLoadError = e;
} catch (Exception e) {
nativeLoadException = e;
}
}
/**
* Determines if the native libraries are loaded.
*
* @return Returns true if the native libraries are loaded,
* false otherwise.
*/
public static boolean isLoaded() {
return nativeLibraryLoaded;
}
/**
* Sets the V8 flags on the platform. All runtimes will be created
* with the same flags. Flags must be set before the runtime is
* created.
*
* @param flags The flags to set on V8
*/
public static void setFlags(final String flags) {
v8Flags = flags;
initialized = false;
}
/**
* Creates a new V8Runtime and loads the required
* native libraries if they are not already loaded.
* The current thread is given the lock to this runtime.
*
* @return A new isolated V8 Runtime.
*/
public static V8 createV8Runtime() {
return createV8Runtime(null, null);
}
/**
* Creates a new V8Runtime and loads the required native libraries if they
* are not already loaded. An alias is also set for the global scope. For example,
* 'window' can be set as the global scope name.
*
* The current thread is given the lock to this runtime.
*
* @param globalAlias The name to associate with the global scope.
*
* @return A new isolated V8 Runtime.
*/
public static V8 createV8Runtime(final String globalAlias) {
return createV8Runtime(globalAlias, null);
}
/**
* Creates a new V8Runtime and loads the required native libraries if they
* are not already loaded. An alias is also set for the global scope. For example,
* 'window' can be set as the global scope name.
*
* The current thread is given the lock to this runtime.
*
* @param globalAlias The name to associate with the global scope.
* @param tempDirectory The name of the directory to extract the native
* libraries too.
*
* @return A new isolated V8 Runtime.
*/
public static V8 createV8Runtime(final String globalAlias, final String tempDirectory) {
if (!nativeLibraryLoaded) {
synchronized (lock) {
if (!nativeLibraryLoaded) {
load(tempDirectory);
}
}
}
checkNativeLibraryLoaded();
if (!initialized) {
_setFlags(v8Flags);
initialized = true;
}
V8 runtime = new V8(globalAlias);
synchronized (lock) {
runtimeCounter++;
}
return runtime;
}
public void setSignatureProvider(final SignatureProvider signatureProvider) {
this.signatureProvider = signatureProvider;
}
/**
* Adds a ReferenceHandler to track when new V8Objects are created.
*
* @param handler The ReferenceHandler to add
*/
public void addReferenceHandler(final ReferenceHandler handler) {
referenceHandlers.add(0, handler);
}
/**
* Adds a handler that will be called when the runtime is being released.
* The runtime will still be available when the handler is executed.
*
* @param handler The handler to invoke when the runtime, is being released
*/
public void addReleaseHandler(final V8Runnable handler) {
releaseHandlers.add(handler);
}
/**
* Removes an existing ReferenceHandler from the collection of reference handlers.
* If the ReferenceHandler does not exist in the collection, it is ignored.
*
* @param handler The reference handler to remove
*/
public void removeReferenceHandler(final ReferenceHandler handler) {
referenceHandlers.remove(handler);
}
/**
* Removes an existing release handler from the collection of release handlers.
* If the release handler does not exist in the collection, it is ignored.
*
* @param handler The handler to remove
*/
public void removeReleaseHandler(final V8Runnable handler) {
releaseHandlers.remove(handler);
}
/**
* Associates an arbitrary object with this runtime.
*
* @param key The key used to reference this object
* @param value The object to associate with this runtime
*/
public synchronized void setData(final String key, final Object value) {
if (data == null) {
data = new HashMap<String, Object>();
}
data.put(key, value);
}
/**
* Returns the data object associated with this runtime, null if no object
* has been associated.
*
* @param key The key used to reference this object
*
* @return The data object associated with this runtime, or null.
*/
public Object getData(final String key) {
if (data == null) {
return null;
}
return data.get(key);
}
private void notifyReleaseHandlers(final V8 runtime) {
for (V8Runnable handler : releaseHandlers) {
handler.run(runtime);
}
}
private void notifyReferenceCreated(final V8Value object) {
for (ReferenceHandler referenceHandler : referenceHandlers) {
referenceHandler.v8HandleCreated(object);
}
}
private void notifyReferenceDisposed(final V8Value object) {
for (ReferenceHandler referenceHandler : referenceHandlers) {
referenceHandler.v8HandleDisposed(object);
}
}
private static void checkNativeLibraryLoaded() {
if (!nativeLibraryLoaded) {
String vendorName = LibraryLoader.computeLibraryShortName(true);
String baseName = LibraryLoader.computeLibraryShortName(false);
String message = "J2V8 native library not loaded (" + baseName + "/" + vendorName + ")";
if (nativeLoadError != null) {
throw new IllegalStateException(message, nativeLoadError);
} else if (nativeLoadException != null) {
throw new IllegalStateException(message, nativeLoadException);
} else {
throw new IllegalStateException(message);
}
}
}
protected V8() {
this(null);
}
protected V8(final String globalAlias) {
super(null);
released = false;
v8RuntimePtr = _createIsolate(globalAlias);
locker = new V8Locker(this);
checkThread();
objectHandle = _getGlobalObject(v8RuntimePtr);
}
public long createInspector(final V8InspectorDelegate inspectorDelegate, final String contextName) {
return _createInspector(v8RuntimePtr, inspectorDelegate, contextName);
}
public void dispatchProtocolMessage(final long V8InspectorPtr, final String protocolMessage) {
checkThread();
_dispatchProtocolMessage(v8RuntimePtr, V8InspectorPtr, protocolMessage);
}
public void schedulePauseOnNextStatement(final long V8InspectorPtr, final String reason) {
checkThread();
_schedulePauseOnNextStatement(v8RuntimePtr, V8InspectorPtr, reason);
}
/**
* Returns an UNDEFINED constant.
*
* @return The UNDEFINED constant value.
*/
public static V8Value getUndefined() {
return undefined;
}
/**
* Returns the number of active runtimes.
*
* @return The number of active runtimes.
*/
public static int getActiveRuntimes() {
return runtimeCounter;
}
/**
* Returns the number of Object References for this runtime.
*
* @return The number of Object References on this runtime.
*/
public long getObjectReferenceCount() {
return objectReferences - v8WeakReferences.size();
}
protected long getV8RuntimePtr() {
return v8RuntimePtr;
}
/**
* Gets the version of the V8 engine
*
* @return The version of the V8 Engine.
*/
public static String getV8Version() {
return _getVersion();
}
/**
* Returns the revision ID of this version as specified
* by the source code management system. Currently we use
* Git, so this will return the commit ID for this revision.
*
* @return The revision ID of this version of J2V8
*/
public static String getSCMRevision() {
return "Unknown revision ID";
}
/*
* (non-Javadoc)
* @see com.eclipsesource.v8.V8Value#close()
*/
@Override
public void close() {
release(true);
}
/*
* (non-Javadoc)
* @see com.eclipsesource.v8.V8Value#release()
*/
@Override
@Deprecated
public void release() {
release(true);
}
/**
* Terminates any JavaScript executing on this runtime. Once
* the runtime is released, any executors that were spawned
* will also be force terminated.
*/
public void terminateExecution() {
forceTerminateExecutors = true;
terminateExecution(v8RuntimePtr);
}
/**
* Release native resources associated with this runtime. Once
* released, a runtime cannot be reused.
*
* @param reportMemoryLeaks True if memory leaks should be
* reported by throwing an IllegalStateException if any
* objects were not released.
*/
public void release(final boolean reportMemoryLeaks) {
if (isReleased()) {
return;
}
checkThread();
try {
notifyReleaseHandlers(this);
} finally {
releaseResources();
shutdownExecutors(forceTerminateExecutors);
if (executors != null) {
executors.clear();
}
releaseNativeMethodDescriptors();
synchronized (lock) {
runtimeCounter--;
}
_releaseRuntime(v8RuntimePtr);
v8RuntimePtr = 0L;
released = true;
if (reportMemoryLeaks && (getObjectReferenceCount() > 0)) {
throw new IllegalStateException(getObjectReferenceCount() + " Object(s) still exist in runtime");
}
}
}
private void releaseNativeMethodDescriptors() {
Set<Long> nativeMethodDescriptors = functionRegistry.keySet();
for (Long nativeMethodDescriptor : nativeMethodDescriptors) {
releaseMethodDescriptor(v8RuntimePtr, nativeMethodDescriptor);
}
}
private void releaseResources() {
if (resources != null) {
for (Releasable releasable : resources) {
releasable.release();
}
resources.clear();
resources = null;
}
}
/**
* Registers an executor with this runtime. An executor is another
* runtime with its own thread. By registering an executor, it can be
* terminated when this runtime is released.
*
* @param key The key to associate the executor with.
* @param executor The executor itself.
*/
public void registerV8Executor(final V8Object key, final V8Executor executor) {
checkThread();
if (executors == null) {
executors = new V8Map<V8Executor>();
}
executors.put(key, executor);
}
/**
* Removes the executor from this runtime. The executor is
* *NOT* shutdown, simply removed from the list of known
* executors.
*
* @param key The key the executor was associated with.
* @return The executor or null if it does not exist.
*/
public V8Executor removeExecutor(final V8Object key) {
checkThread();
if (executors == null) {
return null;
}
return executors.remove(key);
}
/**
* Returns the executor associated with the given key.
*
* @param key The key the executor was associated with.
* @return The executor or null if it does not exist.
*/
public V8Executor getExecutor(final V8Object key) {
checkThread();
if (executors == null) {
return null;
}
return executors.get(key);
}
/**
* Shutdown all executors associated with this runtime.
* If force terminate is specified, it will forcefully terminate
* the executors, otherwise it will simply signal that they
* should terminate.
*
* @param forceTerminate Specify if the executors should be
* forcefully terminated, or simply notified to shutdown when ready.
*/
public void shutdownExecutors(final boolean forceTerminate) {
checkThread();
if (executors == null) {
return;
}
for (V8Executor executor : executors.values()) {
if (forceTerminate) {
executor.forceTermination();
} else {
executor.shutdown();
}
}
}
/**
* Registers a resource with this runtime. All registered
* resources will be released before the runtime is released.
*
* @param resource The resource to register.
*/
public void registerResource(final Releasable resource) {
checkThread();
if (resources == null) {
resources = new ArrayList<Releasable>();
}
resources.add(resource);
}
/**
* Executes a JS Script on this runtime and returns the result as an integer.
* If the result is not an integer, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
*
* @return The result of the script as an integer, or V8ResultUndefinedException if
* the result is not an integer.
*/
public int executeIntegerScript(final String script) {
return executeIntegerScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as an integer.
* If the result is not an integer, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for excepton purposes.
*
* @return The result of the script as an integer, or V8ResultUndefinedException if
* the result is not an integer.
*/
public int executeIntegerScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
checkScript(script);
return executeIntegerScript(v8RuntimePtr, script, scriptName, lineNumber);
}
protected void createTwin(final V8Value value, final V8Value twin) {
checkThread();
createTwin(v8RuntimePtr, value.getHandle(), twin.getHandle());
}
/**
* Executes a JS Script on this runtime and returns the result as a double.
* If the result is not a double, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
*
* @return The result of the script as a double, or V8ResultUndefinedException if
* the result is not a double.
*/
public double executeDoubleScript(final String script) {
return executeDoubleScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a double.
* If the result is not a double, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*
* @return The result of the script as a double, or V8ResultUndefinedException if
* the result is not a double.
*/
public double executeDoubleScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
checkScript(script);
return executeDoubleScript(v8RuntimePtr, script, scriptName, lineNumber);
}
/**
* Executes a JS Script on this runtime and returns the result as a String.
* If the result is not a String, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
*
* @return The result of the script as a String, or V8ResultUndefinedException if
* the result is not a String.
*/
public String executeStringScript(final String script) {
return executeStringScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a String.
* If the result is not a String, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*
* @return The result of the script as a String, or V8ResultUndefinedException if
* the result is not a String.
*/
public String executeStringScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
checkScript(script);
return executeStringScript(v8RuntimePtr, script, scriptName, lineNumber);
}
/**
* Executes a JS Script on this runtime and returns the result as a boolean.
* If the result is not a boolean, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
*
* @return The result of the script as a boolean, or V8ResultUndefinedException if
* the result is not a boolean.
*/
public boolean executeBooleanScript(final String script) {
return executeBooleanScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a boolean.
* If the result is not a boolean, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*
* @return The result of the script as a boolean, or V8ResultUndefinedException if
* the result is not a boolean.
*/
public boolean executeBooleanScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
checkScript(script);
return executeBooleanScript(v8RuntimePtr, script, scriptName, lineNumber);
}
/**
* Executes a JS Script on this runtime and returns the result as a V8Array.
* If the result is not a V8Array, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
*
* @return The result of the script as a V8Array, or V8ResultUndefinedException if
* the result is not a V8Array.
*/
public V8Array executeArrayScript(final String script) {
return executeArrayScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a V8Array.
* If the result is not a V8Array, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*
* @return The result of the script as a V8Array, or V8ResultUndefinedException if
* the result is not a V8Array.
*/
public V8Array executeArrayScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
Object result = this.executeScript(script, scriptName, lineNumber);
if (result instanceof V8Array) {
return (V8Array) result;
}
throw new V8ResultUndefined();
}
/**
* Executes a JS Script on this runtime and returns the result as a Java Object.
* Primitives will be boxed.
*
* @param script The script to execute.
*
* @return The result of the script as a Java Object.
*/
public Object executeScript(final String script) {
return executeScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a Java Object.
* Primitives will be boxed.
*
* @param script The script to execute.
* @param uri The name of the script
*
* @return The result of the script as a Java Object.
*/
public Object executeScript(final String script, final String uri) {
checkThread();
checkScript(script);
return executeScript(getV8RuntimePtr(), UNKNOWN, script, uri, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a Java Object.
* Primitives will be boxed.
*
* @param script The script to execute.
* @param uri The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*
* @return The result of the script as a Java Object.
*/
public Object executeScript(final String script, final String uri, final int lineNumber) {
checkThread();
checkScript(script);
return executeScript(getV8RuntimePtr(), UNKNOWN, script, uri, lineNumber);
}
/**
* Executes a JS Script module on this runtime and returns the result as a Java Object.
* Primitives will be boxed.
*
* If the script does not match the signature (as verified with the public key) then a
* V8SecurityException will be thrown.
*
* @param script The signed script to execute
* @param modulePrefix The module prefix
* @param modulePostfix The module postfix
* @param uri The name of the script
*
* @return The result of the script as a Java Object.
*/
public Object executeModule(final String script, final String modulePrefix, final String modulePostfix, final String uri) {
checkThread();
checkScript(script);
return executeScript(getV8RuntimePtr(), UNKNOWN, modulePrefix + script + modulePostfix, uri, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a V8Object.
* If the result is not a V8Object, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
*
* @return The result of the script as a V8Object, or V8ResultUndefinedException if
* the result is not a V8Object.
*/
public V8Object executeObjectScript(final String script) {
return this.executeObjectScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime and returns the result as a V8Object.
* If the result is not a V8Object, then a V8ResultUndefinedException is thrown.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*
* @return The result of the script as a V8Object, or V8ResultUndefinedException if
* the result is not a V8Object.
*/
public V8Object executeObjectScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
Object result = this.executeScript(script, scriptName, lineNumber);
if (result instanceof V8Object) {
return (V8Object) result;
}
throw new V8ResultUndefined();
}
/**
* Executes a JS Script on this runtime.
*
* @param script The script to execute.
*/
public void executeVoidScript(final String script) {
executeVoidScript(script, null, 0);
}
/**
* Executes a JS Script on this runtime.
*
* @param script The script to execute.
* @param scriptName The name of the script
* @param lineNumber The line number that is considered to be the first line of
* the script. Typically 0, but could be set to another value for exception stack trace purposes.
*/
public void executeVoidScript(final String script, final String scriptName, final int lineNumber) {
checkThread();
checkScript(script);
executeVoidScript(v8RuntimePtr, script, scriptName, lineNumber);
}
/**
* Returns the locker associated with this runtime. The locker allows
* threads to give up control of the runtime and other threads to acquire
* control.
*
* @return The locker associated with this runtime.
*/
public V8Locker getLocker() {
return locker;
}
/**
* Returns the unique build ID of the native library.
*
* @return The unique build ID of the Native library.
*/
public static long getBuildID() {
return _getBuildID();
}
/**
* Indicates to V8 that the system is low on memory.
* V8 may use this to attempt to recover space by running
* the garbage collector.
*/
public void lowMemoryNotification() {
checkThread();
lowMemoryNotification(getV8RuntimePtr());
}
void checkRuntime(final V8Value value) {
if ((value == null) || value.isUndefined()) {
return;
}
V8 runtime = value.getRuntime();
if ((runtime == null) ||
runtime.isReleased() ||
(runtime != this)) {
throw new Error("Invalid target runtime");
}
}
void checkThread() {
locker.checkThread();
if (isReleased()) {
throw new Error("Runtime disposed error");
}
}
static void checkScript(final String script) {
if (script == null) {
throw new NullPointerException("Script is null");
}
}
void registerCallback(final Object object, final Method method, final long objectHandle, final String jsFunctionName, final boolean includeReceiver) {
MethodDescriptor methodDescriptor = new MethodDescriptor();
methodDescriptor.object = object;
methodDescriptor.method = method;
methodDescriptor.includeReceiver = includeReceiver;
long methodID = registerJavaMethod(getV8RuntimePtr(), objectHandle, jsFunctionName, isVoidMethod(method));
functionRegistry.put(methodID, methodDescriptor);
}
void registerVoidCallback(final JavaVoidCallback callback, final long objectHandle, final String jsFunctionName) {
MethodDescriptor methodDescriptor = new MethodDescriptor();
methodDescriptor.voidCallback = callback;
long methodID = registerJavaMethod(getV8RuntimePtr(), objectHandle, jsFunctionName, true);
functionRegistry.put(methodID, methodDescriptor);
}
void registerCallback(final JavaCallback callback, final long objectHandle, final String jsFunctionName) {
long methodID = registerJavaMethod(getV8RuntimePtr(), objectHandle, jsFunctionName, false);
createAndRegisterMethodDescriptor(callback, methodID);
}
void createAndRegisterMethodDescriptor(final JavaCallback callback, final long methodID) {
MethodDescriptor methodDescriptor = new MethodDescriptor();
methodDescriptor.callback = callback;
functionRegistry.put(methodID, methodDescriptor);
}
private boolean isVoidMethod(final Method method) {
Class<?> returnType = method.getReturnType();
if (returnType.equals(Void.TYPE)) {
return true;
}
return false;
}
private Object getDefaultValue(final Class<?> type) {
if (type.equals(V8Object.class)) {
return new Undefined();
} else if (type.equals(V8Array.class)) {
return new V8Array.Undefined();
}
return invalid;
}
protected void disposeMethodID(final long methodID) {
functionRegistry.remove(methodID);
}
protected void weakReferenceReleased(final long objectID) {
V8Value v8Value = v8WeakReferences.get(objectID);
if (v8Value != null) {
v8WeakReferences.remove(objectID);
try {
v8Value.close();
} catch (Exception e) {
// Swallow these exceptions. The V8 GC is running, and
// if we return to V8 with Java exception on our stack,
// we will be in a world of hurt.
}
}
}
protected Object callObjectJavaMethod(final long methodID, final V8Object receiver, final V8Array parameters) throws Throwable {
MethodDescriptor methodDescriptor = functionRegistry.get(methodID);
if (methodDescriptor.callback != null) {
return checkResult(methodDescriptor.callback.invoke(receiver, parameters));
}
boolean hasVarArgs = methodDescriptor.method.isVarArgs();
Object[] args = getArgs(receiver, methodDescriptor, parameters, hasVarArgs);
checkArgs(args);
try {
Object result = methodDescriptor.method.invoke(methodDescriptor.object, args);
return checkResult(result);
} catch (InvocationTargetException e) {
throw e.getTargetException();
} catch (IllegalAccessException e) {
throw e;
} catch (IllegalArgumentException e) {
throw e;
} finally {
releaseArguments(args, hasVarArgs);
}
}
private Object checkResult(final Object result) {
if (result == null) {
return result;
}
if (result instanceof Float) {
return ((Float) result).doubleValue();
}
if ((result instanceof Integer) || (result instanceof Double) || (result instanceof Boolean)
|| (result instanceof String)) {
return result;
}
if (result instanceof V8Value) {
if (((V8Value) result).isReleased()) {
throw new V8RuntimeException("V8Value already released");
}
return result;
}
throw new V8RuntimeException("Unknown return type: " + result.getClass());
}
protected void callVoidJavaMethod(final long methodID, final V8Object receiver, final V8Array parameters) throws Throwable {
MethodDescriptor methodDescriptor = functionRegistry.get(methodID);
if (methodDescriptor.voidCallback != null) {
methodDescriptor.voidCallback.invoke(receiver, parameters);
return;
}
boolean hasVarArgs = methodDescriptor.method.isVarArgs();
Object[] args = getArgs(receiver, methodDescriptor, parameters, hasVarArgs);
checkArgs(args);
try {
methodDescriptor.method.invoke(methodDescriptor.object, args);
} catch (InvocationTargetException e) {
throw e.getTargetException();
} catch (IllegalAccessException e) {
throw e;
} catch (IllegalArgumentException e) {
throw e;
} finally {
releaseArguments(args, hasVarArgs);
}
}
private void checkArgs(final Object[] args) {
for (Object argument : args) {
if (argument == invalid) {
throw new IllegalArgumentException("argument type mismatch");
}
}
}
private void releaseArguments(final Object[] args, final boolean hasVarArgs) {
if (hasVarArgs && ((args.length > 0) && (args[args.length - 1] instanceof Object[]))) {
Object[] varArgs = (Object[]) args[args.length - 1];
for (Object object : varArgs) {
if (object instanceof V8Value) {
((V8Value) object).close();
}
}
}
for (Object arg : args) {
if (arg instanceof V8Value) {
((V8Value) arg).close();
}
}
}
private Object[] getArgs(final V8Object receiver, final MethodDescriptor methodDescriptor, final V8Array parameters, final boolean hasVarArgs) {
int numberOfParameters = methodDescriptor.method.getParameterTypes().length;
int varArgIndex = hasVarArgs ? numberOfParameters - 1 : numberOfParameters;
Object[] args = setDefaultValues(new Object[numberOfParameters], methodDescriptor.method.getParameterTypes(), receiver, methodDescriptor.includeReceiver);
List<Object> varArgs = new ArrayList<Object>();
populateParamters(parameters, varArgIndex, args, varArgs, methodDescriptor.includeReceiver);
if (hasVarArgs) {
Object varArgContainer = getVarArgContainer(methodDescriptor.method.getParameterTypes(), varArgs.size());
System.arraycopy(varArgs.toArray(), 0, varArgContainer, 0, varArgs.size());
args[varArgIndex] = varArgContainer;
}
return args;
}
private Object getVarArgContainer(final Class<?>[] parameterTypes, final int size) {
Class<?> clazz = parameterTypes[parameterTypes.length - 1];
if (clazz.isArray()) {
clazz = clazz.getComponentType();
}
Object result = java.lang.reflect.Array.newInstance(clazz, size);
return result;
}
private void populateParamters(final V8Array parameters, final int varArgIndex, final Object[] args, final List<Object> varArgs, final boolean includeReceiver) {
int start = 0;
if (includeReceiver) {
start = 1;
}
for (int i = start; i < (parameters.length() + start); i++) {
if (i >= varArgIndex) {
varArgs.add(getArrayItem(parameters, i - start));
} else {
args[i] = getArrayItem(parameters, i - start);
}
}
}
private Object[] setDefaultValues(final Object[] parameters, final Class<?>[] parameterTypes, final V8Object receiver, final boolean includeReceiver) {
int start = 0;
if (includeReceiver) {
start = 1;
parameters[0] = receiver;
}
for (int i = start; i < parameters.length; i++) {
parameters[i] = getDefaultValue(parameterTypes[i]);
}
return parameters;
}
private Object getArrayItem(final V8Array array, final int index) {
try {
int type = array.getType(index);
switch (type) {
case INTEGER:
return array.getInteger(index);
case DOUBLE:
return array.getDouble(index);
case BOOLEAN:
return array.getBoolean(index);
case STRING:
return array.getString(index);
case V8_ARRAY:
case V8_TYPED_ARRAY:
return array.getArray(index);
case V8_OBJECT:
return array.getObject(index);
case V8_FUNCTION:
return array.getObject(index);
case V8_ARRAY_BUFFER:
return array.get(index);
case UNDEFINED:
return V8.getUndefined();
}
} catch (V8ResultUndefined e) {
// do nothing
}
return null;
}
void createNodeRuntime(final String fileName) {
_startNodeJS(v8RuntimePtr, fileName);
}
boolean pumpMessageLoop() {
return _pumpMessageLoop(v8RuntimePtr);
}
boolean isRunning() {
return _isRunning(v8RuntimePtr);
}
protected long initNewV8Object(final long v8RuntimePtr) {
return _initNewV8Object(v8RuntimePtr);
}
protected long initEmptyContainer(final long v8RuntimePtr) {
return _initEmptyContainer(v8RuntimePtr);
}
protected void acquireLock(final long v8RuntimePtr) {
_acquireLock(v8RuntimePtr);
}
protected void releaseLock(final long v8RuntimePtr) {
_releaseLock(v8RuntimePtr);
}
protected void lowMemoryNotification(final long v8RuntimePtr) {
_lowMemoryNotification(v8RuntimePtr);
}
protected void createTwin(final long v8RuntimePtr, final long objectHandle, final long twinHandle) {
_createTwin(v8RuntimePtr, objectHandle, twinHandle);
}
protected int executeIntegerScript(final long v8RuntimePtr, final String script, final String scriptName, final int lineNumber) {
return _executeIntegerScript(v8RuntimePtr, script, scriptName, lineNumber);
}
protected double executeDoubleScript(final long v8RuntimePtr, final String script, final String scriptName, final int lineNumber) {
return _executeDoubleScript(v8RuntimePtr, script, scriptName, lineNumber);
}
protected String executeStringScript(final long v8RuntimePtr, final String script, final String scriptName, final int lineNumber) {
return _executeStringScript(v8RuntimePtr, script, scriptName, lineNumber);
}
protected boolean executeBooleanScript(final long v8RuntimePtr, final String script, final String scriptName, final int lineNumber) {
return _executeBooleanScript(v8RuntimePtr, script, scriptName, lineNumber);
}
protected Object executeScript(final long v8RuntimePtr, final int expectedType, final String script, final String scriptName, final int lineNumber) {
return _executeScript(v8RuntimePtr, expectedType, script, scriptName, lineNumber);
}
protected void executeVoidScript(final long v8RuntimePtr, final String script, final String scriptName, final int lineNumber) {
_executeVoidScript(v8RuntimePtr, script, scriptName, lineNumber);
}
protected void setWeak(final long v8RuntimePtr, final long objectHandle) {
_setWeak(v8RuntimePtr, objectHandle);
}
protected void clearWeak(final long v8RuntimePtr, final long objectHandle) {
_clearWeak(v8RuntimePtr, objectHandle);
}
protected boolean isWeak(final long v8RuntimePtr, final long objectHandle) {
return _isWeak(v8RuntimePtr, objectHandle);
}
protected void release(final long v8RuntimePtr, final long objectHandle) {
_release(v8RuntimePtr, objectHandle);
}
protected boolean contains(final long v8RuntimePtr, final long objectHandle, final String key) {
return _contains(v8RuntimePtr, objectHandle, key);
}
protected String[] getKeys(final long v8RuntimePtr, final long objectHandle) {
return _getKeys(v8RuntimePtr, objectHandle);
}
protected int getInteger(final long v8RuntimePtr, final long objectHandle, final String key) {
return _getInteger(v8RuntimePtr, objectHandle, key);
}
protected boolean getBoolean(final long v8RuntimePtr, final long objectHandle, final String key) {
return _getBoolean(v8RuntimePtr, objectHandle, key);
}
protected double getDouble(final long v8RuntimePtr, final long objectHandle, final String key) {
return _getDouble(v8RuntimePtr, objectHandle, key);
}
protected String getString(final long v8RuntimePtr, final long objectHandle, final String key) {
return _getString(v8RuntimePtr, objectHandle, key);
}
protected Object get(final long v8RuntimePtr, final int expectedType, final long objectHandle, final String key) {
return _get(v8RuntimePtr, expectedType, objectHandle, key);
}
protected int executeIntegerFunction(final long v8RuntimePtr, final long objectHandle, final String name, final long parametersHandle) {
return _executeIntegerFunction(v8RuntimePtr, objectHandle, name, parametersHandle);
}
protected double executeDoubleFunction(final long v8RuntimePtr, final long objectHandle, final String name, final long parametersHandle) {
return _executeDoubleFunction(v8RuntimePtr, objectHandle, name, parametersHandle);
}
protected String executeStringFunction(final long v8RuntimePtr, final long handle, final String name, final long parametersHandle) {
return _executeStringFunction(v8RuntimePtr, handle, name, parametersHandle);
}
protected boolean executeBooleanFunction(final long v8RuntimePtr, final long handle, final String name, final long parametersHandle) {
return _executeBooleanFunction(v8RuntimePtr, handle, name, parametersHandle);
}
protected Object executeFunction(final long v8RuntimePtr, final int expectedType, final long objectHandle, final String name, final long parametersHandle) {
return _executeFunction(v8RuntimePtr, expectedType, objectHandle, name, parametersHandle);
}
protected Object executeFunction(final long v8RuntimePtr, final long receiverHandle, final long functionHandle, final long parametersHandle) {
return _executeFunction(v8RuntimePtr, receiverHandle, functionHandle, parametersHandle);
}
protected void executeVoidFunction(final long v8RuntimePtr, final long objectHandle, final String name, final long parametersHandle) {
_executeVoidFunction(v8RuntimePtr, objectHandle, name, parametersHandle);
}
protected boolean equals(final long v8RuntimePtr, final long objectHandle, final long that) {
return _equals(v8RuntimePtr, objectHandle, that);
}
protected String toString(final long v8RuntimePtr, final long objectHandle) {
return _toString(v8RuntimePtr, objectHandle);
}
protected boolean strictEquals(final long v8RuntimePtr, final long objectHandle, final long that) {
return _strictEquals(v8RuntimePtr, objectHandle, that);
}
protected boolean sameValue(final long v8RuntimePtr, final long objectHandle, final long that) {
return _sameValue(v8RuntimePtr, objectHandle, that);
}
protected int identityHash(final long v8RuntimePtr, final long objectHandle) {
return _identityHash(v8RuntimePtr, objectHandle);
}
protected void add(final long v8RuntimePtr, final long objectHandle, final String key, final int value) {
_add(v8RuntimePtr, objectHandle, key, value);
}
protected void addObject(final long v8RuntimePtr, final long objectHandle, final String key, final long value) {
_addObject(v8RuntimePtr, objectHandle, key, value);
}
protected void add(final long v8RuntimePtr, final long objectHandle, final String key, final boolean value) {
_add(v8RuntimePtr, objectHandle, key, value);
}
protected void add(final long v8RuntimePtr, final long objectHandle, final String key, final double value) {
_add(v8RuntimePtr, objectHandle, key, value);
}
protected void add(final long v8RuntimePtr, final long objectHandle, final String key, final String value) {
_add(v8RuntimePtr, objectHandle, key, value);
}
protected void addUndefined(final long v8RuntimePtr, final long objectHandle, final String key) {
_addUndefined(v8RuntimePtr, objectHandle, key);
}
protected void addNull(final long v8RuntimePtr, final long objectHandle, final String key) {
_addNull(v8RuntimePtr, objectHandle, key);
}
protected long registerJavaMethod(final long v8RuntimePtr, final long objectHandle, final String functionName, final boolean voidMethod) {
return _registerJavaMethod(v8RuntimePtr, objectHandle, functionName, voidMethod);
}
protected long initNewV8ArrayBuffer(final long v8RuntimePtr, final ByteBuffer buffer, final int capacity) {
return _initNewV8ArrayBuffer(v8RuntimePtr, buffer, capacity);
}
protected long initNewV8ArrayBuffer(final long v8RuntimePtr, final int capacity) {
return _initNewV8ArrayBuffer(v8RuntimePtr, capacity);
}
public long initNewV8Int32Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8Int32Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8Float32Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8Float32Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8Float64Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8Float64Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8UInt32Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8UInt32Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8UInt16Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8UInt16Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8Int16Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8Int16Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8UInt8Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8UInt8Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8Int8Array(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8Int8Array(runtimePtr, bufferHandle, offset, size);
}
public long initNewV8UInt8ClampedArray(final long runtimePtr, final long bufferHandle, final int offset, final int size) {
return _initNewV8UInt8ClampedArray(runtimePtr, bufferHandle, offset, size);
}
protected ByteBuffer createV8ArrayBufferBackingStore(final long v8RuntimePtr, final long objectHandle, final int capacity) {
return _createV8ArrayBufferBackingStore(v8RuntimePtr, objectHandle, capacity);
}
protected long initNewV8Array(final long v8RuntimePtr) {
return _initNewV8Array(v8RuntimePtr);
}
protected long[] initNewV8Function(final long v8RuntimePtr) {
checkThread();
return _initNewV8Function(v8RuntimePtr);
}
protected int arrayGetSize(final long v8RuntimePtr, final long arrayHandle) {
return _arrayGetSize(v8RuntimePtr, arrayHandle);
}
protected int arrayGetInteger(final long v8RuntimePtr, final long arrayHandle, final int index) {
return _arrayGetInteger(v8RuntimePtr, arrayHandle, index);
}
protected boolean arrayGetBoolean(final long v8RuntimePtr, final long arrayHandle, final int index) {
return _arrayGetBoolean(v8RuntimePtr, arrayHandle, index);
}
protected byte arrayGetByte(final long v8RuntimePtr, final long arrayHandle, final int index) {
return _arrayGetByte(v8RuntimePtr, arrayHandle, index);
}
protected double arrayGetDouble(final long v8RuntimePtr, final long arrayHandle, final int index) {
return _arrayGetDouble(v8RuntimePtr, arrayHandle, index);
}
protected String arrayGetString(final long v8RuntimePtr, final long arrayHandle, final int index) {
return _arrayGetString(v8RuntimePtr, arrayHandle, index);
}
protected Object arrayGet(final long v8RuntimePtr, final int expectedType, final long arrayHandle, final int index) {
return _arrayGet(v8RuntimePtr, expectedType, arrayHandle, index);
}
protected void addArrayIntItem(final long v8RuntimePtr, final long arrayHandle, final int value) {
_addArrayIntItem(v8RuntimePtr, arrayHandle, value);
}
protected void addArrayBooleanItem(final long v8RuntimePtr, final long arrayHandle, final boolean value) {
_addArrayBooleanItem(v8RuntimePtr, arrayHandle, value);
}
protected void addArrayDoubleItem(final long v8RuntimePtr, final long arrayHandle, final double value) {
_addArrayDoubleItem(v8RuntimePtr, arrayHandle, value);
}
protected void addArrayStringItem(final long v8RuntimePtr, final long arrayHandle, final String value) {
_addArrayStringItem(v8RuntimePtr, arrayHandle, value);
}
protected void addArrayObjectItem(final long v8RuntimePtr, final long arrayHandle, final long value) {
_addArrayObjectItem(v8RuntimePtr, arrayHandle, value);
}
protected void addArrayUndefinedItem(final long v8RuntimePtr, final long arrayHandle) {
_addArrayUndefinedItem(v8RuntimePtr, arrayHandle);
}
protected void addArrayNullItem(final long v8RuntimePtr, final long arrayHandle) {
_addArrayNullItem(v8RuntimePtr, arrayHandle);
}
protected String getConstructorName(final long v8RuntimePtr, final long objectHandle) {
return _getConstructorName(v8RuntimePtr, objectHandle);
}
protected int getType(final long v8RuntimePtr, final long objectHandle) {
return _getType(v8RuntimePtr, objectHandle);
}
protected int getType(final long v8RuntimePtr, final long objectHandle, final String key) {
return _getType(v8RuntimePtr, objectHandle, key);
}
protected int getType(final long v8RuntimePtr, final long objectHandle, final int index) {
return _getType(v8RuntimePtr, objectHandle, index);
}
protected int getArrayType(final long v8RuntimePtr, final long objectHandle) {
return _getArrayType(v8RuntimePtr, objectHandle);
}
protected int getType(final long v8RuntimePtr, final long objectHandle, final int index, final int length) {
return _getType(v8RuntimePtr, objectHandle, index, length);
}
protected void setPrototype(final long v8RuntimePtr, final long objectHandle, final long prototypeHandle) {
_setPrototype(v8RuntimePtr, objectHandle, prototypeHandle);
}
protected int[] arrayGetIntegers(final long v8RuntimePtr, final long objectHandle, final int index, final int length) {
return _arrayGetIntegers(v8RuntimePtr, objectHandle, index, length);
}
protected double[] arrayGetDoubles(final long v8RuntimePtr, final long objectHandle, final int index, final int length) {
return _arrayGetDoubles(v8RuntimePtr, objectHandle, index, length);
}
protected boolean[] arrayGetBooleans(final long v8RuntimePtr, final long objectHandle, final int index, final int length) {
return _arrayGetBooleans(v8RuntimePtr, objectHandle, index, length);
}
protected byte[] arrayGetBytes(final long v8RuntimePtr, final long objectHandle, final int index, final int length) {
return _arrayGetBytes(v8RuntimePtr, objectHandle, index, length);
}
protected String[] arrayGetStrings(final long v8RuntimePtr, final long objectHandle, final int index, final int length) {
return _arrayGetStrings(v8RuntimePtr, objectHandle, index, length);
}
protected int arrayGetIntegers(final long v8RuntimePtr, final long objectHandle, final int index, final int length, final int[] resultArray) {
return _arrayGetIntegers(v8RuntimePtr, objectHandle, index, length, resultArray);
}
protected int arrayGetDoubles(final long v8RuntimePtr, final long objectHandle, final int index, final int length, final double[] resultArray) {
return _arrayGetDoubles(v8RuntimePtr, objectHandle, index, length, resultArray);
}
protected int arrayGetBooleans(final long v8RuntimePtr, final long objectHandle, final int index, final int length, final boolean[] resultArray) {
return _arrayGetBooleans(v8RuntimePtr, objectHandle, index, length, resultArray);
}
protected int arrayGetBytes(final long v8RuntimePtr, final long objectHandle, final int index, final int length, final byte[] resultArray) {
return _arrayGetBytes(v8RuntimePtr, objectHandle, index, length, resultArray);
}
protected int arrayGetStrings(final long v8RuntimePtr, final long objectHandle, final int index, final int length, final String[] resultArray) {
return _arrayGetStrings(v8RuntimePtr, objectHandle, index, length, resultArray);
}
protected void terminateExecution(final long v8RuntimePtr) {
_terminateExecution(v8RuntimePtr);
}
protected void releaseMethodDescriptor(final long v8RuntimePtr, final long methodDescriptor) {
_releaseMethodDescriptor(v8RuntimePtr, methodDescriptor);
}
private native long _initNewV8Object(long v8RuntimePtr);
private native long _initEmptyContainer(long v8RuntimePtr);
private native void _acquireLock(long v8RuntimePtr);
private native void _releaseLock(long v8RuntimePtr);
private native void _lowMemoryNotification(long v8RuntimePtr);
private native void _createTwin(long v8RuntimePtr, long objectHandle, long twinHandle);
private native void _releaseRuntime(long v8RuntimePtr);
private native long _createIsolate(String globalAlias);
private native long _createInspector(long v8RuntimePtr, final V8InspectorDelegate inspectorDelegate, final String contextName);
private native void _dispatchProtocolMessage(final long v8RuntimePtr, long v8InspectorPtr, final String protocolMessage);
private native void _schedulePauseOnNextStatement(final long v8RuntimePtr, long v8InspectorPtr, final String reason);
private native int _executeIntegerScript(long v8RuntimePtr, final String script, final String scriptName, final int lineNumber);
private native double _executeDoubleScript(long v8RuntimePtr, final String script, final String scriptName, final int lineNumber);
private native String _executeStringScript(long v8RuntimePtr, final String script, final String scriptName, final int lineNumber);
private native boolean _executeBooleanScript(long v8RuntimePtr, final String script, final String scriptName, final int lineNumber);
private native Object _executeScript(long v8RuntimePtr, int expectedType, String script, String scriptName, int lineNumber);
private native void _executeVoidScript(long v8RuntimePtr, String script, String scriptName, int lineNumber);
private native void _release(long v8RuntimePtr, long objectHandle);
private native void _releaseMethodDescriptor(long v8RuntimePtr, long methodDescriptor);
private native boolean _contains(long v8RuntimePtr, long objectHandle, final String key);
private native String[] _getKeys(long v8RuntimePtr, long objectHandle);
private native int _getInteger(long v8RuntimePtr, long objectHandle, final String key);
private native boolean _getBoolean(long v8RuntimePtr, long objectHandle, final String key);
private native double _getDouble(long v8RuntimePtr, long objectHandle, final String key);
private native String _getString(long v8RuntimePtr, long objectHandle, final String key);
private native Object _get(long v8RuntimePtr, int expectedType, long objectHandle, final String key);
private native int _executeIntegerFunction(long v8RuntimePtr, long objectHandle, String name, long parametersHandle);
private native double _executeDoubleFunction(long v8RuntimePtr, long objectHandle, String name, long parametersHandle);
private native String _executeStringFunction(long v8RuntimePtr2, long handle, String name, long parametersHandle);
private native boolean _executeBooleanFunction(long v8RuntimePtr2, long handle, String name, long parametersHandle);
private native Object _executeFunction(long v8RuntimePtr, int expectedType, long objectHandle, String name, long parametersHandle);
private native Object _executeFunction(long v8RuntimePtr, long receiverHandle, long functionHandle, long parametersHandle);
private native void _executeVoidFunction(long v8RuntimePtr, long objectHandle, final String name, final long parametersHandle);
private native boolean _equals(long v8RuntimePtr, long objectHandle, long that);
private native String _toString(long v8RuntimePtr, long ObjectHandle);
private native boolean _strictEquals(long v8RuntimePtr, long objectHandle, long that);
private native boolean _sameValue(long v8RuntimePtr, long objectHandle, long that);
private native int _identityHash(long v8RuntimePtr, long objectHandle);
private native void _add(long v8RuntimePtr, long objectHandle, final String key, final int value);
private native void _addObject(long v8RuntimePtr, long objectHandle, final String key, final long value);
private native void _add(long v8RuntimePtr, long objectHandle, final String key, final boolean value);
private native void _add(long v8RuntimePtr, long objectHandle, final String key, final double value);
private native void _add(long v8RuntimePtr, long objectHandle, final String key, final String value);
private native void _addUndefined(long v8RuntimePtr, long objectHandle, final String key);
private native void _addNull(long v8RuntimePtr, long objectHandle, final String key);
private native long _registerJavaMethod(long v8RuntimePtr, long objectHandle, final String functionName, final boolean voidMethod);
private native long _initNewV8Array(long v8RuntimePtr);
private native long[] _initNewV8Function(long v8RuntimePtr);
private native int _arrayGetSize(long v8RuntimePtr, long arrayHandle);
private native int _arrayGetInteger(long v8RuntimePtr, long arrayHandle, int index);
private native boolean _arrayGetBoolean(long v8RuntimePtr, long arrayHandle, int index);
private native byte _arrayGetByte(long v8RuntimePtr, long arrayHandle, int index);
private native double _arrayGetDouble(long v8RuntimePtr, long arrayHandle, int index);
private native String _arrayGetString(long v8RuntimePtr, long arrayHandle, int index);
private native Object _arrayGet(long v8RuntimePtr, int expectedType, long arrayHandle, int index);
private native void _addArrayIntItem(long v8RuntimePtr, long arrayHandle, int value);
private native void _addArrayBooleanItem(long v8RuntimePtr, long arrayHandle, boolean value);
private native void _addArrayDoubleItem(long v8RuntimePtr, long arrayHandle, double value);
private native void _addArrayStringItem(long v8RuntimePtr, long arrayHandle, String value);
private native void _addArrayObjectItem(long v8RuntimePtr, long arrayHandle, long value);
private native void _addArrayUndefinedItem(long v8RuntimePtr, long arrayHandle);
private native void _addArrayNullItem(long v8RuntimePtr, long arrayHandle);
private native int _getType(long v8RuntimePtr, long objectHandle, final String key);
private native int _getType(long v8RuntimePtr, long objectHandle, final int index);
private native int _getArrayType(long v8RuntimePtr, long objectHandle);
private native void _setPrototype(long v8RuntimePtr, long objectHandle, long prototypeHandle);
private native String _getConstructorName(long v8RuntimePtr, long objectHandle);
private native int _getType(long v8RuntimePtr, long objectHandle);
private native int _getType(long v8RuntimePtr, long objectHandle, final int index, final int length);
private native double[] _arrayGetDoubles(final long v8RuntimePtr, final long objectHandle, final int index, final int length);
private native int[] _arrayGetIntegers(final long v8RuntimePtr, final long objectHandle, final int index, final int length);
private native boolean[] _arrayGetBooleans(final long v8RuntimePtr, final long objectHandle, final int index, final int length);
private native byte[] _arrayGetBytes(final long v8RuntimePtr, final long objectHandle, final int index, final int length);
private native String[] _arrayGetStrings(final long v8RuntimePtr, final long objectHandle, final int index, final int length);
private native int _arrayGetIntegers(final long v8RuntimePtr, final long objectHandle, final int index, final int length, int[] resultArray);
private native int _arrayGetDoubles(final long v8RuntimePtr, final long objectHandle, final int index, final int length, double[] resultArray);
private native int _arrayGetBooleans(final long v8RuntimePtr, final long objectHandle, final int index, final int length, boolean[] resultArray);
private native int _arrayGetBytes(final long v8RuntimePtr, final long objectHandle, final int index, final int length, byte[] resultArray);
private native int _arrayGetStrings(final long v8RuntimePtr, final long objectHandle, final int index, final int length, String[] resultArray);
private native long _initNewV8ArrayBuffer(long v8RuntimePtr, int capacity);
private native long _initNewV8ArrayBuffer(long v8RuntimePtr, ByteBuffer buffer, int capacity);
private native long _initNewV8Int32Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8UInt32Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8Float32Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8Float64Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8Int16Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8UInt16Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8Int8Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8UInt8Array(long runtimePtr, long bufferHandle, int offset, int size);
private native long _initNewV8UInt8ClampedArray(long runtimePtr, long bufferHandle, int offset, int size);
private native void _setWeak(long runtimePtr, long objectHandle);
private native void _clearWeak(long runtimePtr, long objectHandle);
private native boolean _isWeak(long runtimePtr, long objectHandle);
private native ByteBuffer _createV8ArrayBufferBackingStore(final long v8RuntimePtr, final long objectHandle, final int capacity);
private native static String _getVersion();
private static native void _setFlags(String v8flags);
private native void _terminateExecution(final long v8RuntimePtr);
private native long _getGlobalObject(final long v8RuntimePtr);
private native static long _getBuildID();
private native static void _startNodeJS(final long v8RuntimePtr, final String fileName);
private native static boolean _pumpMessageLoop(final long v8RuntimePtr);
private native static boolean _isRunning(final long v8RuntimePtr);
private native static boolean _isNodeCompatible();
public static boolean isNodeCompatible() {
if (!nativeLibraryLoaded) {
synchronized (lock) {
if (!nativeLibraryLoaded) {
load(null);
}
}
}
return _isNodeCompatible();
}
void addObjRef(final V8Value reference) {
objectReferences++;
if (!referenceHandlers.isEmpty()) {
notifyReferenceCreated(reference);
}
}
void releaseObjRef(final V8Value reference) {
if (!referenceHandlers.isEmpty()) {
notifyReferenceDisposed(reference);
}
objectReferences--;
}
} |
package org.usfirst.frc190.CurrentRobotProject;
//This is an "enum-type" class that allows you specify how the robot should move in order to collect the ball.
//Values 0-9 are resevered for passing control commands to the Arduino.
//Values 10-40 are reserved for pressure sensing. (Value is 10 = No lights, Value is 40 = All lights on strip on.
//Values 41+ are reserved for different LED light strip patterns.
public class ArduinoMessages {
public final byte value;
protected static final byte DISABLED_VAL = 0;
protected static final byte ENABLED_VAL = 1;
protected static final byte COLLECTING_VAL = 2;
protected static final byte LOWERING_PIVOT_VAL = 3;
protected static final byte RAISING_PIVOT_VAL = 4;
protected static final byte ROLLERS_IN_VAL = 5;
protected static final byte ROLLERS_OUT_VAL = 6;
protected static final byte FIRE_VAL = 7;
protected static final byte BOOM_TRIPLE_THREAT_VAL = 8;
protected static final byte MOVING_TO_STORED_VAL = 9;
protected static final byte PREPARING_TO_SCORE_LOW_VAL = 10;
protected static final byte PREPARING_FOR_TRUSS_VAL = 11;
protected static final byte PREPARING_FOR_FAR_VAL = 12;
protected static final byte CLOSING_CLAW_VAL = 13;
protected static final byte OPENING_CLAW_VAL = 14;
public static final ArduinoMessages DISABLED = new ArduinoMessages(DISABLED_VAL);
public static final ArduinoMessages ENABLED = new ArduinoMessages(ENABLED_VAL);
public static final ArduinoMessages COLLECTING = new ArduinoMessages(COLLECTING_VAL);
public static final ArduinoMessages LOWERING_PIVOT = new ArduinoMessages(LOWERING_PIVOT_VAL);
public static final ArduinoMessages RAISING_PIVOT = new ArduinoMessages(RAISING_PIVOT_VAL);
public static final ArduinoMessages ROLLERS_IN = new ArduinoMessages(ROLLERS_IN_VAL);
public static final ArduinoMessages ROLLERS_OUT = new ArduinoMessages(ROLLERS_OUT_VAL);
public static final ArduinoMessages FIRE = new ArduinoMessages(FIRE_VAL);
public static final ArduinoMessages BOOM_TRIPLE_THREAT = new ArduinoMessages(BOOM_TRIPLE_THREAT_VAL);
public static final ArduinoMessages MOVING_TO_STORED = new ArduinoMessages(MOVING_TO_STORED_VAL);
public static final ArduinoMessages PREPARING_TO_SCORE_LOW = new ArduinoMessages(PREPARING_TO_SCORE_LOW_VAL);
public static final ArduinoMessages PREPARING_FOR_TRUSS = new ArduinoMessages(PREPARING_FOR_TRUSS_VAL);
public static final ArduinoMessages PREPARING_FOR_FAR = new ArduinoMessages(PREPARING_FOR_FAR_VAL);
public static final ArduinoMessages CLOSING_CLAW = new ArduinoMessages(CLOSING_CLAW_VAL);
public static final ArduinoMessages OPENING_CLAW = new ArduinoMessages(OPENING_CLAW_VAL);
private ArduinoMessages(byte value){
this.value = value;
}
}
|
Op-Ed by Michael Snyder
A shocking new survey has found that support for capitalism is dying in America. In fact, more than half of all adults in the United States under the age of 30 say that they do not support capitalism at this point. You might be tempted to dismiss them as “foolish young people”, but the truth is that they are the future of America. As older generations die off, they will eventually become the leaders of this country. And of course our nation has not resembled anything close to a capitalist society for quite some time now. In a recent article, I listed 97 different taxes that Americans pay each year, and some Americans actually end up returning more than half of what they earn to the government by the time it is all said and done. So at best it could be said that we are running some sort of hybrid system that isn’t as far down the road toward full-blown socialism as most European nations are. But without a doubt we are moving in that direction, and our young people are going to be cheering every step of the way.
When I first heard of this new survey from Harvard University, I was absolutely stunned. The following is from what the Washington Post had to say about it…
The Harvard University survey, which polled young adults between ages 18 and 29, found that 51 percent of respondents do not support capitalism. Just 42 percent said they support it. It isn’t clear that the young people in the poll would prefer some alternative system, though. Just 33 percent said they supported socialism. The survey had a margin of error of 2.4 percentage points.
Could it be possible that young adults were confused by the wording of the survey?
Well, other polls have come up with similar results…
The university’s results echo recent findings from Republican pollster Frank Luntz, who surveyed 1,000 Americans between the ages of 18 and 26 and found that 58% of respondents believed socialism to be the “ compassionate” political system . And when participants were asked to sum up the root of America’s problem in one word, 29% said “greed.”
This trend among our young people is very real, and you can see it in their support of Bernie Sanders. For millions upon millions of young adults in America today, Hillary Clinton is not nearly liberal enough for them. So they have flocked to Sanders, and if they had been the only ones voting in this election season, he would have won the Democratic nomination by a landslide.
Sadly, most of our young people don’t seem to understand how socialism slowly but surely destroys a nation. If you want to see the end result of socialism, just look at the economic collapse that is going on in Venezuela right now. The following comes from a Bloomberg article entitled “Venezuela Doesn’t Have Enough Money to Pay for Its Money”…
Venezuela’s epic shortages are nothing new at this point. No diapers or car parts or aspirin — it’s all been well documented. But now the country is at risk of running out of money itself. In a tale that highlights the chaos of unbridled inflation, Venezuela is scrambling to print new bills fast enough to keep up with the torrid pace of price increases. Most of the cash, like nearly everything else in the oil-exporting country, is imported. And with hard currency reserves sinking to critically low levels, the central bank is doling out payments so slowly to foreign providers that they are foregoing further business. Venezuela, in other words, is now so broke that it may not have enough money to pay for its money.
We are losing an entire generation of young people. These days, there is quite a lot of talk about how we need to get America back to the principles that it was founded upon; but the cold, hard reality of the matter is that most of our young people are running in the opposite direction as fast as they can.
And Americans under the age of 30 are not just becoming more liberal when it comes to economics. Surveys have found that they are more than twice as likely to support gay rights and less than half as likely to regularly attend church as the oldest Americans are.
So why is this happening?
Well, the truth is that our colleges and universities have become indoctrination centers for the progressive movement. I know, because I spent eight years at public universities in this country. The quality of the education that our young people are receiving is abysmal, but the values that are being imparted to them will last a lifetime.
And of course the same things could be said about our system of education all the way down to the kindergarten level. There are still some good people in the system, but overall it is overwhelmingly dominated by the progressives.
Meanwhile, the major entertainment providers in the United States are also promoting the same values. In a recent article entitled “Depressing Survey Results Show How Extremely Stupid America Has Become”, I discussed a Nielsen report which detailed how much time the average American spends consuming media on various electronic devices each day…
Watching live television: 4 hours, 32 minutes
Watching time-shifted television: 30 minutes
Listening to the radio: 2 hours, 44 minutes
Using a smartphone: 1 hour, 33 minutes
Using Internet on a computer: 1 hour, 6 minutes
Overall, the average American spends about 10 hours a day consuming one form of entertainment or another.
When you allow that much “programming” into your mind, it is inevitable that it is going to shape your values, and our young people are more “plugged in” than any of the rest of us.
So, yes, I believe that it is exceedingly clear why we should be deeply concerned about the future of America. The values that are being relentlessly pounded into the heads of our young people are directly opposed to the values that this nation was founded upon, and it is these young people that will determine the path that this country ultimately takes.
*About the author: Michael Snyder is the founder and publisher of The Economic Collapse Blog. Michael’s controversial new book about Bible prophecy entitled “The Rapture Verdict” is available in paperback andfor the Kindle on Amazon.com.* |
def test_tokens():
from easypy.tokens import AUTO, if_auto, MAX
def foo(p=AUTO):
return if_auto(p, 100)
assert foo() == 100
assert foo(5) == 5
assert foo(MAX) == MAX
assert MAX == "MAX"
assert MAX == "<MAX>"
assert MAX == "max"
assert MAX == "<max>"
d = {AUTO: AUTO, MAX: MAX}
assert d[AUTO] == AUTO
assert d[MAX] == MAX
assert d['<MAX>'] is MAX
assert 'AUTO' not in d
|
// Init registers the prometheus metrics for the measurement of the exporter itsself.
func Init() {
prometheus.MustRegister(FunctionDurations)
prometheus.MustRegister(FunctionCountTotal)
} |
// GetLocalAddrList returns a list of local IP addresses
func GetLocalAddrList() ([]string, error) {
var addrList []string
intfList, err := net.Interfaces()
if err != nil {
return addrList, err
}
log.Debugf("Got address list(%d): %+v", len(intfList), intfList)
for _, intf := range intfList {
if strings.HasPrefix(intf.Name, "docker") || strings.HasPrefix(intf.Name, "veth") ||
strings.HasPrefix(intf.Name, "vport") || strings.HasPrefix(intf.Name, "lo") {
continue
}
addrs, err := intf.Addrs()
if err != nil {
return addrList, err
}
for _, addr := range addrs {
addrList = append(addrList, addr.String())
}
}
return addrList, err
} |
// Copyright 2018. All rights reserved.
// This file is part of go2cache project
// Created by duguying on 2018/5/17.
package go2cache
import (
"testing"
"time"
)
func TestBroadcast(t *testing.T) {
go dial([]string{"127.0.0.1:9981", "192.168.2.227:60000"})
go listen("127.0.0.1:9981")
time.Sleep(time.Second * 3)
}
|
// children returns all child nodes of this node, which are string|*jNode. Optionally it either starts at offset 1 or 2
func children(n *jNode) []interface{} {
res := make([]interface{}, 0)
for i := 1; i < len(*n); i++ {
if _, isAttr := ((*n)[i]).(map[string]interface{}); isAttr {
continue
}
child := cast((*n)[i])
if child != nil {
res = append(res, child)
} else {
str := stringOf((*n)[i])
res = append(res, str)
}
}
return res
} |
package me.mervin.project.asRank.extract;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.HashSet;
import java.util.Set;
import me.mervin.util.D;
import me.mervin.util.FileTool;
/**
* Widgets.java
*
* @author Mervin.Wong DateTime 2014-4-18 上午9:12:33
* @email:<EMAIL>
* @version 0.5.0
*/
public class Widgets {
private FileTool ft = new FileTool();
/**
*/
public Widgets() {
// TODO 自动生成的构造函数存根
}
public static void main(String[] arg){
Widgets w = new Widgets();
w.splitByAS();
}
public void delFirsColumn(){
String srcDir = "/media/data/data/path/2014/";
String dstDir = "/media/data/data/path/2014/0";
String file = null;
//BufferedReader read = null;
RandomAccessFile f = null;
File[] fileArr1 = ft.fileArr(srcDir);
for (int i = 0; i < fileArr1.length; i++) {
File file2 = fileArr1[i];
File[] fileArr2 = file2.listFiles();
for (int j = 0; j < fileArr2.length; j++) {
File file3 = fileArr2[j];
D.p("read:"+file3.getAbsolutePath());
try {
StringBuffer sb = new StringBuffer();
//read = new BufferedReader(new FileReader(file3));
f = new RandomAccessFile(file3, "rw");
String line = null;
while((line = f.readLine()) != null){
sb.append(line.substring(line.indexOf("\t")+1, line.length())).append("\r\n");
}
f.close();
File f2 = new File(dstDir+file2.getName()+"/");
f2.mkdirs();
D.p("write:"+f2.getAbsolutePath()+"/"+file3.getName());
f = new RandomAccessFile(f2.getAbsolutePath()+"/"+file3.getName(), "rw");
f.writeBytes(sb.toString());
f.close();
} catch (Exception e) {
// TODO: handle exception
}
}
}
}
public void splitByAS(){
String srcDir = "/media/data/data/temp/path/";
String dstDir = "/media/data/data/path/";
File[] fileArr = ft.fileArr(srcDir);
for (int i = 0; i < fileArr.length; i++) {
File file = fileArr[i];
BufferedReader reader = null;
D.p("Read:"+file.getAbsolutePath());
try {
reader = new BufferedReader(new FileReader(file));
String line = null;
String name1 = null;
RandomAccessFile f = null;
Set<Integer> nodeSet = new HashSet<Integer>();
while((line = reader.readLine())!= null){
String name2 = line.substring(0, line.indexOf("\t"));
/* if(!name2.equals(name1)){
if(name1 != null){
f.close();
}
name1 = name2;
f = new RandomAccessFile(new File(dstDir+file.getName().substring(0, file.getName().indexOf("-"))+"/"+name2), "rw");
f.seek(f.length());
}
f.writeBytes(line+"\r\n");*/
nodeSet.add(Integer.parseInt(name2));
}
D.p(file.getAbsolutePath()+""+nodeSet.size());
// f.close();
} catch (FileNotFoundException e) {
// TODO 自动生成的 catch 块
e.printStackTrace();
} catch (IOException e) {
// TODO 自动生成的 catch 块
e.printStackTrace();
}
}
}
}
|
def list_to_zip_range(data: List[Any]) -> List[Tuple[Any, Any]]:
zip_res = zip(data[:-1], data[1:])
result = [rearrange_pair(pair) for pair in zip_res if pair[0] != pair[1]]
return result |
Image copyright Unicef/Romenzi Image caption This child was one of many who fled fighting near Mosul, Iraq, in 2017
The UN children's fund says the scale of attacks on children in the world's conflict zones reached "shocking" levels in 2017.
In a new report, Unicef said there was widespread and blatant disregard for international laws designed to protect the most vulnerable.
Unicef director Manuel Fontaine said children were being targeted in their homes, schools and playgrounds.
He said such brutality "cannot be the new normal".
The report highlights several conflict zones where it said children had suffered in the past year. It included:
In the Central African Republic, children were killed, raped, abducted and recruited by armed groups in a dramatic increase in violence
Islamist militants Boko Haram forced at least 135 children in north-east Nigeria and Cameroon to act as suicide bombers, almost five times the number in 2016
Muslim Rohingya children in Myanmar suffered "shocking and widespread violence" as they were driven from their homes in Rakhine state
In South Sudan, more than 19,000 children were recruited into armed forces and armed groups
Fighting in Yemen has left at least 5,000 children dead or injured according to official figures, with the real number expected to be much higher
In eastern Ukraine, 220,000 children are living under the constant threat from landmines and other unexploded devices left over from the war
"Children are being targeted and exposed to attacks and brutal violence in their homes, schools and playgrounds," said Mr Fontaine, Unicef's director of emergency programmes.
"As these attacks continue year after year, we cannot become numb. Such brutality cannot be the new normal."
Image copyright Unicef/Al-Issa Image caption In Aleppo, Syria, Unicef gave children literature on reporting unexploded ordnance
The report says that in some places, children abducted by extremist groups suffer further abuse when they are released to security forces.
Millions more children suffer from malnutrition, disease and trauma when access to food, water, sanitation and health is prevented by fighting, it says.
"Unicef calls on all parties to conflict to abide by their obligations under international law to immediately end violations against children and the targeting of civilian infrastructure, including schools and hospitals," the reports adds.
"Unicef also calls on states with influence over parties to conflict to use that influence to protect children." |
<gh_stars>100-1000
static char sccsid[] = "@(#)docomm.c 4.2 08/17/82";
#include <signal.h>
#include "head.h"
#include <a.out.h>
#include <stab.h>
struct user u;
L_INT cntval;
INT signo;
INT adrflg;
INT pid;
ADDR userpc;
char *s;
enum {NOCOM, PRCOM, DSCOM, DSICOM} lastcom;
/* last command: nothing noteworthy, print source,
display variable, display instruction */
docommand() {
register char *p;
register int i;
register ADDR addr, bkaddr;
struct proct *procp;
char s[4];
cntval = 1;
adrflg = 0;
errflg = 0;
if (scallf) {
doscall();
setcur(1);
lastcom = NOCOM;
return;
}
if (reflag) { /* search for regular expression */
dore();
lastcom = PRCOM;
return;
}
if (cmd == '\0') {
if (integ != 0 && var[0] != '\0') {
error("Invalid command (1)");
return;
}
if (integ != 0) { /* print line number */
ffind(integ);
fprint();
lastcom = PRCOM;
return;
}
if (var[0] != 0) {
printf("Unexpected null command\n");
return;
}
}
switch (cmd) {
case 'Y':
debug = !debug;
break;
case 'V':
version();
break;
case 'M':
if (args[0]) {
setmap(args);
} else {
printmap("? map", &txtmap);
printmap("/ map", &datmap);
}
break;
case 'x':
printregs();
break;
case 'X':
printpc();
break;
case 'a':
if (integ) {
cpstr(args, "l\n");
} else if (proc[0]) {
cpall(args, "T\n");
} else {
error("Bad arguments");
break;
}
goto setbrk;
break;
case 'l':
setcur(1);
lastcom = NOCOM;
break;
case 'T':
prfrx(1);
lastcom = NOCOM;
break;
case 't':
prframe();
lastcom = NOCOM;
break;
case 'e':
p = args;
if (*p == '\0') {
#ifndef FLEXNAMES
printf("%.16s() in \"%s\"\n",
curproc()->pname, curfile);
#else
printf("%s() in \"%s\"\n",
curproc()->pname, curfile);
#endif
break;
}
while (*p != '\0')
if (*p++ == '.') goto l1;
/* argument is procedure name */
procp = findproc(args);
if ((procp->pname[0] != '\0') && (procp->sfptr != badfile)) {
finit(adrtofilep(procp->paddr)->sfilename);
ffind(procp->lineno);
}
else printf("Can't find %s\n", args);
#ifndef FLEXNAMES
printf("%.16s() in \"%s\"\n", curproc()->pname, curfile);
#else
printf("%s() in \"%s\"\n", curproc()->pname, curfile);
#endif
lastcom = PRCOM;
break;
l1: /* argument is filename */
finit(args);
printf("\"%s\"\n", curfile);
lastcom = PRCOM;
break;
case 'p':
if (integ) ffind(integ);
fprint();
lastcom = PRCOM;
break;
case 'q':
exit(0);
case 'w':
if (integ) ffind(integ);
i = fline;
fback(WINDOW/2);
fprintn(WINDOW);
ffind(i);
lastcom = PRCOM;
break;
case 'Q':
prdebug();
break;
case 'z':
if (integ) ffind(integ);
fprintn(WINDOW);
lastcom = PRCOM;
break;
case '-':
fback(integ ? integ : 1);
fpargs();
lastcom = PRCOM;
break;
case '+':
fforward(integ ? integ : 1);
fpargs();
lastcom = PRCOM;
break;
case '\n':
switch (lastcom) {
case PRCOM:
fforward(1);
fprint();
break;
case DSCOM:
oaddr += oincr ? oincr : typetosize(otype, WORDSIZE);
printf("0x%x/ ", oaddr);
dispf((ADDR) oaddr, odesc,
oclass == N_RSYM ? oclass : N_GSYM, otype, 0, 0, DSP);
break;
case DSICOM:
dot += oincr;
prisploc();
dispi(dot, odesc, N_GSYM, 0, 0);
break;
}
break;
case '\004':
if (!isatty(0))
exit(0);
switch (lastcom) {
case PRCOM:
fforward(1);
printf("\b");
fprintn(WINDOW);
lastcom = PRCOM;
break;
case DSICOM:
printf("\b");
for (i=0; i<WINDOW; i++) {
dot += oincr;
prisploc();
if (dispi(dot, odesc, N_GSYM, 0, 0) == -1)
break;
}
break;
case DSCOM:
printf("\b");
for (i=0; i<WINDOW; i++) {
oaddr += oincr ?
oincr : typetosize(otype, WORDSIZE);
printf("0x%x/ ", oaddr);
if (dispf((ADDR) oaddr, odesc,
oclass == N_RSYM ? oclass :
N_GSYM, otype, 0, 0, DSP) == -1)
break;
}
break;
default:
printf("\n");
}
break;
case 'r':
if (args[0] == '\0') getargs();
case 'R':
signo = 0;
cpstr(oldargs, args);
if (debug) error("calling dopcs");
if (integ) cntval = integ;
if (!executing) {
executing = TRUE;
if (integ) cntval = integ;
dopcs('r');
executing = FALSE;
}
if (debug) error("exiting dopcs");
bkaddr = -1;
goto f1;
case 'c':
signo = 0;
case 'C':
if (proc[0] != '\0' || integ != 0) {
setdot();
if (dot == -1) {
error("Cannot set temporary breakpoint");
break;
}
dopcs('b');
bkaddr = dot;
} else
bkaddr = -1;
integ = atoi(args);
f1: if (debug) error("calling dopcs");
if (integ) cntval = integ;
dopcs('c');
if (debug) error("exiting dopcs");
if (bkaddr != -1) {
ADDR dotsave;
dotsave = dot;
dot = bkaddr;
dopcs('d');
dot = dotsave;
}
if (!signo) printf("Breakpoint");
printf(" at\n");
setcur(1);
lastcom = NOCOM;
break;
case 'S':
case 's':
signo = 0;
integ = atoi(args);
singstep(integ ? integ : 1, cmd);
if (signo) printf("\n");
setcur(1);
lastcom = NOCOM;
break;
case 'g':
if (pid == 0 || signo) {
error("Not stopped at breakpoint");
break;
}
setdot();
if (dot == -1) {
error("Bad address");
break;
}
adrflg = 1;
integ = atoi(args);
if (integ) cntval = integ;
dopcs('c');
if (!signo) printf("Breakpoint");
printf(" at\n");
setcur(1);
lastcom = NOCOM;
break;
case 'k':
if (scallx) {
userpc = dot = *(ADDR *)(((ADDR)&u)+PC) = pcs;
*(ADDR *)(((ADDR)&u)+FP) = fps;
*(ADDR *)(((ADDR)&u)+AP) = aps;
if (bkpts)
bkpts->flag = flagss;
scallx = 0;
error("Procedure killed");
longjmp(env, 0);
} else {
dopcs('k');
printf("\n");
lastcom = NOCOM;
break;
}
case 'B':
prbkpt();
break;
case 'b':
setbrk:
if (proc[0] == '\0' && integ == 0) {
integ = fline;
}
setdot();
if (dot == -1 || dot == 0) {
error("Cannot set breakpoint");
break;
}
dopcs('b');
s[0] = ' ';
s[1] = cmd;
s[2] = '\n';
s[3] = 0;
s[1] = cmd;
printbkpt(s, adrtoprocp(dot), dot);
break;
case 'd':
if (proc[0] == '\0' && integ == 0) {
idbkpt();
break;
}
setdot();
if (dot == -1) {
error("Non existent breakpoint");
break;
}
dopcs('d');
break;
case 'D':
dabkpt();
error("All breakpoints deleted");
break;
case 'm':
addr = varaddr(proc[0] ? proc : curproc()->pname, var);
printf("stopped with value %d\n", monex(addr, 'd'));
setcur(1);
lastcom = NOCOM;
break;
case '?':
if (!(var[0] == '.' && var[1] == '\0'))
setdot();
if (errflg) {
error(errflg);
break;
}
prisploc();
dispi(dot, args[0] ? args : "i", N_GSYM, 0, 0);
lastcom = DSICOM;
break;
case '/':
if (var[0] == '.' && var[1] == '\0') {
if (integ == 0) integ = oaddr;
dispf((ADDR) integ, args[0] ? args : odesc,
oclass == N_RSYM ? oclass : N_GSYM, otype, 0, 0, DSP);
oaddr = integ;
} else
if (integ && (var[0] == '\0')) {
dispf((ADDR) integ, args, N_GSYM, 0, 0, 0, DSP);
oaddr = integ;
cpstr(odesc, args);
oclass = N_GSYM;
otype = 0;
} else
dispvar(proc, var, args);
lastcom = DSCOM;
break;
case '=':
if (var[0] == '\0') {
if (proc[0]) {
addr = getaddr(proc, integ);
if (addr == -1) {
error("Unknown address");
break;
}
}
else
addr = integ;
dispf(addr, args[0] ? args : "x", 0, -1, 0, 0, DSP);
} else
findvar(proc, var, args[0] ? args : "x", 2);
break;
case '!':
if (var[0] == '\0')
addr = getaddr(proc, integ);
else
addr = varaddr(proc, var);
if (addr == -1)
error("Unknown variable");
else {
if (number(args[0]) || eqany(args[0], ".-")) {
char *p;
double atof();
union {
struct{
int w1, w2;
} ww;
double d;
} dbl;
p = (args[0] == '-') ? args+1 : args;
for (; *p != '.' && *p != 'e'; p++) {
if (!number(*p)) goto l2;
}
dbl.d = atof(args);
putval(addr, 'd', dbl.ww.w1);
if (typetodesc(sl_type,0)[0] == 'g')
putval(addr+WORDSIZE, 'd', dbl.ww.w2);
break;
}
l2: if (percentflag)
*(ADDR *)(((ADDR)&u)+addr) = argvalue(args);
else if (sl_class == N_RSYM && addr < 16)
putreg(addr,typetodesc(sl_type,subflag)[0],
argvalue(args));
else
putval(addr,typetodesc(sl_type,subflag)[0],
argvalue(args));
}
lastcom = NOCOM;
break;
case '"':
printf(args);
break;
}
}
fpargs() {
register int i;
switch(args[0]) {
case 'p':
case '\0':
fprint();
break;
case 'w':
i = fline;
fback(WINDOW/2);
fprintn(WINDOW);
ffind(i);
break;
case 'z':
fprintn(WINDOW);
break;
}
}
MSG BADTXT;
/* Used by a, b, c, C, d and g commands to find linenumber */
setdot() {
if (ncolonflag) {
dot = integ;
get(dot, ISP);
if (errflg)
dot = -1;
} else {
dot = getaddr(proc, integ);
if (dot == -1)
errflg = "Bad line number";
}
}
|
def is_string_nan(x, specific_nan_strings=[]):
if isinstance(x, str):
if (('other' in x.lower() and len(x) < 9)
or ('null' in x.lower() and len(x) < 7)
or (x.lower() == 'nan')
or ('discrepancy' in x.lower() and len(x) < 14)
or all([char == ' ' for char in x])
or all([char == '_' for char in x])
or all([char == '.' for char in x])
or ('unknown' in x.lower())
or ('not obtainable' in x.lower())
or ('not obtained' in x.lower())
or ('not applicable' in x.lower())
or ('not available' in x.lower())
or ('not evaluated' in x.lower())
or (x in specific_nan_strings)):
return True
else:
return False
else:
warnings.warn(f'Found a non string value of type {type(x)}. As we\'re \
expecting a string, any other format will be considered \
a missing value.')
return True |
package starportcmd
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"github.com/tendermint/starport/starport/pkg/entrywriter"
"github.com/tendermint/starport/starport/services/network/networktypes"
)
var LaunchSummaryHeader = []string{"launch ID", "chain ID", "source", "campaign ID", "network", "reward"}
// NewNetworkChainList returns a new command to list all published chains on Starport Network
func NewNetworkChainList() *cobra.Command {
c := &cobra.Command{
Use: "list",
Short: "List published chains",
Args: cobra.NoArgs,
RunE: networkChainListHandler,
}
return c
}
func networkChainListHandler(cmd *cobra.Command, args []string) error {
nb, err := newNetworkBuilder(cmd)
if err != nil {
return err
}
nb.Spinner.Stop()
n, err := nb.Network()
if err != nil {
return err
}
chainLaunches, err := n.ChainLaunchesWithReward(cmd.Context())
if err != nil {
return err
}
nb.Cleanup()
return renderLaunchSummaries(chainLaunches, os.Stdout)
}
// renderLaunchSummaries writes into the provided out, the list of summarized launches
func renderLaunchSummaries(chainLaunches []networktypes.ChainLaunch, out io.Writer) error {
var launchEntries [][]string
for _, c := range chainLaunches {
campaign := "no campaign"
if c.CampaignID > 0 {
campaign = fmt.Sprintf("%d", c.CampaignID)
}
reward := entrywriter.None
if len(c.Reward) > 0 {
reward = c.Reward
}
launchEntries = append(launchEntries, []string{
fmt.Sprintf("%d", c.ID),
c.ChainID,
c.SourceURL,
campaign,
c.Network.String(),
reward,
})
}
return entrywriter.MustWrite(out, LaunchSummaryHeader, launchEntries...)
}
|
// Bool returns a pointer to a bool holding the value of ts. If ts is None then
// Bool returns nil.
func (ts *TriState) Bool() *bool {
var b bool
switch *ts {
case None:
return nil
case False:
b = false
case True:
b = true
}
return &b
} |
Environmental and biological controls on Na∕Ca ratios in scleractinian cold-water corals
Abstract. Here we present a comprehensive attempt to correlate aragonitic Na∕Ca ratios
from Desmophyllum pertusum (formerly known as Lophelia pertusa), Madrepora oculata and a caryophylliid cold-water coral (CWC) species with
different seawater parameters such as temperature, salinity and pH. Living
CWC specimens were collected from 16 different locations and analyzed for
their Na∕Ca ratios using solution-based inductively coupled plasma-optical
emission spectrometry (ICP-OES) measurements. The results reveal no apparent correlation with salinity (30.1–40.57 g kg−1) but a significant inverse correlation with temperature (-0.31±0.04 mmolmol-1∘C-1). Other marine aragonitic organisms such as Mytilus edulis (inner
aragonitic shell portion) and Porites sp. exhibit similar results highlighting the
consistency of the calculated CWC regressions. Corresponding Na∕Mg ratios
show a similar temperature sensitivity to Na∕Ca ratios, but the combination
of two ratios appears to reduce the impact of vital effects and
domain-dependent geochemical variation. The high degree of scatter and
elemental heterogeneities between the different skeletal features in both
Na∕Ca and Na∕Mg, however, limit the use of these ratios as a proxy and/or make
a high number of samples necessary. Additionally, we explore two models to
explain the observed temperature sensitivity of Na∕Ca ratios for an open and
semi-enclosed calcifying space based on temperature-sensitive Na- and Ca-pumping enzymes and transport proteins that change the composition of the
calcifying fluid and consequently the skeletal Na∕Ca ratio.
|
/**
* \cond
* Returns the name of the header in prefixed upper case form at the index
* provided, or NULL if the index is not valid.
* @param dataSet pointer to an initialised dataset
* @param httpHeaderIndex index of the HTTP header name required
* @returns name of the header, or NULL if index not valid
* \endcond
*/
const char* fiftyoneDegreesGetPrefixedUpperHttpHeaderName(const fiftyoneDegreesDataSet *dataSet, int httpHeaderIndex) {
const char *prefixedUpperHeaderName = NULL;
if (dataSet->prefixedUpperHttpHeaders == NULL) {
initPrefixedUpperHttpHeaderNames(dataSet);
}
if (httpHeaderIndex >= 0 &&
httpHeaderIndex < dataSet->httpHeadersCount) {
prefixedUpperHeaderName = dataSet->prefixedUpperHttpHeaders[httpHeaderIndex];
}
return prefixedUpperHeaderName;
} |
package main
import (
"log"
//"syscall/js"
"github.com/vugu/vugu"
"github.com/vugu/vugu/js"
)
func (c *Root) ExecFromGo(e vugu.DOMEvent) {
log.Printf("In ExecFromGo...\n")
//j := js.Global().Call("jQuery", "#jquery-test-label")
j := js.Global().Get("jQuery").New("#jquery-test-label")
grey := "#cccccc"
//j.SetCss("color", grey)
colorMap := make(map[string]interface{})
colorMap["color"] = grey
j.Call("css", js.ValueOf(colorMap))
t := j.Call("text")
log.Printf("(Before update, from WASM) text = %v\n", t)
j.Call("text", "Updated (from WASM)")
t = j.Call("text")
log.Printf("(After update, from WASM) text = %v\n", t)
}
func (c *Root) InvokeJS(e vugu.DOMEvent) {
log.Printf("In InvokeJS...\n")
js.Global().Get("helperFunc").Invoke()
}
|
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
import time, sys
from sklearn.svm import LinearSVC, SVC
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn import metrics
import sklearn
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import LinearSVC,SVC
from sklearn.model_selection import KFold
# import self defined functions
from riemannian_multiscale import riemannian_multiscale
from filters import load_filterbank
from get_data import get_data
__author__ = "<NAME> and <NAME>"
__email__ = "<EMAIL>,<EMAIL>"
class Riemannian_Model:
def __init__(self):
self.crossvalidation = False
self.data_path = 'dataset/'
self.svm_kernel = 'linear' #'sigmoid'#'linear' # 'sigmoid', 'rbf',
self.svm_c = 0.1 # for linear 0.1 (inverse),
self.NO_splits = 5 # number of folds in cross validation
self.fs = 250. # sampling frequency
self.NO_channels = 22 # number of EEG channels
self.NO_subjects = 9
self.NO_riem = int(self.NO_channels*(self.NO_channels+1)/2) # Total number of CSP feature per band and timewindow
self.bw = np.array([2,4,8,16,32]) # bandwidth of filtered signals
self.ftype = 'butter' # 'fir', 'butter'
self.forder= 2 # 4
self.filter_bank = load_filterbank(self.bw,self.fs,order=self.forder,max_freq=40,ftype = self.ftype) # get filterbank coeffs
time_windows_flt = np.array([[2.5,4.5],
[4,6],
[2.5,6],
[2.5,3.5],
[3,4],
[4,5]])*self.fs
self.time_windows = time_windows_flt.astype(int)
# restrict time windows and frequency bands
self.time_windows = self.time_windows[2:3] # use only largest timewindow
#self.f_bands_nom = self.f_bands_nom[18:27] # use only 4Hz-32Hz bands
self.rho = 0.1
self.NO_bands = self.filter_bank.shape[0]
self.NO_time_windows = self.time_windows.shape[0]
self.NO_features = self.NO_riem*self.NO_bands*self.NO_time_windows
self.riem_opt = "Riemann" # {"Riemann","Riemann_Euclid","Whitened_Euclid","No_Adaptation"}
# time measurements
self.train_time = 0
self.train_trials = 0
self.eval_time = 0
self.eval_trials = 0
def run_riemannian(self, ML_model):
################################ Training ############################################################################
start_train = time.time()
# 1. calculate features and mean covariance for training
riemann = riemannian_multiscale(self.filter_bank,self.time_windows,riem_opt =self.riem_opt,rho = self.rho,vectorized = True)
train_feat = riemann.fit(self.train_data)
# 2. Train SVM Model
X = train_feat
y = self.train_label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
if ML_model == "RandomForestClassifier":
print("RandomForest Started")
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
elif ML_model == "KNeighborsClassifier":
print("KNeighborsClassifier Started")
clf = sklearn.neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
elif ML_model == "AdaBoostClassifier":
print("AdaBoostClassifier Started")
clf = sklearn.ensemble.AdaBoostClassifier()
clf.fit(X_train, y_train)
elif ML_model == "BaggingClassifier":
print("BaggingClassifier Started")
clf = sklearn.ensemble.BaggingClassifier()
clf.fit(X_train, y_train)
elif ML_model == "GaussianNB":
print("GaussianNB Started")
clf = sklearn.naive_bayes.GaussianNB()
clf.fit(X_train, y_train)
elif ML_model == "LDA":
print("LDA Started")
clf = LinearDiscriminantAnalysis()
clf.fit(X_train, y_train)
end_train = time.time()
self.train_time += end_train-start_train
self.train_trials += len(self.train_label)
################################# Evaluation ###################################################
start_eval = time.time()
eval_feat = riemann.features(self.eval_data)
success_rate = clf.score(eval_feat,self.eval_label)
end_eval = time.time()
#print("Time for one Evaluation " + str((end_eval-start_eval)/len(self.eval_label)) )
self.eval_time += end_eval-start_eval
self.eval_trials += len(self.eval_label)
return success_rate
def load_data(self):
if self.crossvalidation:
data,label = get_data(self.subject,True,self.data_path)
kf = KFold(n_splits=self.NO_splits)
split = 0
for train_index, test_index in kf.split(data):
if self.split == split:
self.train_data = data[train_index]
self.train_label = label[train_index]
self.eval_data = data[test_index]
self.eval_label = label[test_index]
split += 1
else:
self.train_data,self.train_label = get_data(self.subject,True,self.data_path)
self.eval_data,self.eval_label = get_data(self.subject,False,self.data_path)
def main():
all_ML_models = {"RandomForestClassifier": 1,
"AdaBoostClassifier": 1,
"BaggingClassifier" : 1,
"KNeighborsClassifier" : 1,
"GaussianNB": 1,
"LDA" : 1
}
for ML_model in all_ML_models:
if all_ML_models[ML_model]:
model = Riemannian_Model()
print("Number of used features: "+ str(model.NO_features))
print(model.riem_opt)
# success rate sum over all subjects
success_tot_sum = 0
if model.crossvalidation:
print("Cross validation run")
else:
print("Test data set")
start = time.time()
# Go through all subjects
for model.subject in range(1,model.NO_subjects+1):
#print("Subject" + str(model.subject)+":")
if model.crossvalidation:
success_sub_sum = 0
for model.split in range(model.NO_splits):
model.load_data()
success_sub_sum += model.run_riemannian(ML_model)
# average over all splits
success_rate = success_sub_sum/model.NO_splits
else:
# load Eval data
model.load_data()
success_rate = model.run_riemannian(ML_model)
print(success_rate)
success_tot_sum += success_rate
# Average success rate over all subjects
print("*"*100 + " \n\n\n\n results for %s \n\n\n\n" % (ML_model) +"*"*100 )
print("Average success rate: " + str(success_tot_sum/model.NO_subjects))
print("Training average time: " + str(model.train_time/model.NO_subjects))
print("Evaluation average time: " + str(model.eval_time/model.NO_subjects))
end = time.time()
print("Time elapsed [s] " + str(end - start))
if __name__ == '__main__':
main()
|
def find_constriction_template(self, similarity_table, constriction_in_reference):
gn = self.gn
parse = GPCRDBParsingPDB()
for structure, value in similarity_table.items():
this_anomaly = ProteinAnomaly.objects.filter(generic_number__label=gn)
anomaly_list = structure.protein_anomalies.all().prefetch_related()
if constriction_in_reference==True:
try:
for anomaly in this_anomaly:
if anomaly in anomaly_list:
gn_list = [parse.gn_indecer(gn,'x',-2), parse.gn_indecer(gn,'x',-1),
parse.gn_indecer(gn,'x',+1), parse.gn_indecer(gn,'x',+2)]
if self.check_range(gn_list,structure.protein_conformation,4)==0:
raise Exception()
alt_const = parse.fetch_residues_from_pdb(structure, gn_list)
self.template = structure
return alt_const
except:
pass
elif constriction_in_reference==False:
try:
suitable_temp = []
for anomaly in this_anomaly:
if anomaly not in anomaly_list:
pass
else:
suitable_temp.append('no')
if 'no' not in suitable_temp:
gn_list = [parse.gn_indecer(gn,'x',-2), parse.gn_indecer(gn,'x',-1), gn,
parse.gn_indecer(gn,'x',+1), parse.gn_indecer(gn,'x',+2)]
if self.check_range(gn_list,structure.protein_conformation,5)==0:
raise Exception()
alt_const = parse.fetch_residues_from_pdb(structure, gn_list)
self.template = structure
return alt_const
except:
pass
return None |
def data_gen(voc_size, batch, nbatches, seq_len = 15):
for i in range(nbatches):
data = torch.from_numpy(
np.random.randint(1, voc_size, size=(batch, seq_len)))
data[:, 0] = 1
src = Variable(data, requires_grad=False)
tgt = Variable(data, requires_grad=False)
yield Batch(src=src, trg=tgt, pad=0) |
<gh_stars>10-100
/*
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2019-2021, <NAME> <<EMAIL>>
*/
#include <getopt.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "parse.h"
#include "utils.h"
static const struct option opts[] = {
{"lines", required_argument, NULL, 'n'},
{"show", no_argument, NULL, 's'},
{"show-full", no_argument, NULL, 'S'},
{"version", no_argument, NULL, 'V'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0},
};
static void
usage(FILE *out)
{
fprintf(out, "Usage: csv-head [OPTION]...\n");
fprintf(out, "Print the first 10 rows of CSV file from standard input to standard output.\n");
fprintf(out, "\n");
fprintf(out, "Options:\n");
fprintf(out,
" -n, --lines=NUM print the first NUM rows instead of the first 10,\n"
" NUM must be >= 0\n");
describe_Show(out);
describe_Show_full(out);
describe_help(out);
describe_version(out);
}
struct cb_params {
size_t lines;
size_t printed;
};
static int
next_row(const char *buf, const size_t *col_offs, size_t ncols, void *arg)
{
struct cb_params *params = arg;
if (params->printed >= params->lines)
return 1;
params->printed++;
csv_print_line(stdout, buf, col_offs, ncols, true);
return 0;
}
int
main(int argc, char *argv[])
{
int opt;
struct cb_params params;
unsigned show_flags = SHOW_DISABLED;
params.lines = 10;
params.printed = 0;
while ((opt = getopt_long(argc, argv, "n:sS", opts, NULL)) != -1) {
switch (opt) {
case 'n':
if (strtoul_safe(optarg, ¶ms.lines, 0))
exit(2);
break;
case 's':
show_flags |= SHOW_SIMPLE;
break;
case 'S':
show_flags |= SHOW_FULL;
break;
case 'V':
printf("git\n");
return 0;
case 'h':
default:
usage(stdout);
return 2;
}
}
csv_show(show_flags);
struct csv_ctx *s = csv_create_ctx_nofail(stdin, stderr);
csv_read_header_nofail(s);
const struct col_header *headers;
size_t nheaders = csv_get_headers(s, &headers);
csv_print_headers(stdout, headers, nheaders);
if (csv_read_all(s, &next_row, ¶ms) < 0)
exit(2);
csv_destroy_ctx(s);
return 0;
}
|
package com.revolsys.gis.parallel;
import org.springframework.core.convert.converter.Converter;
import com.revolsys.parallel.channel.Channel;
import com.revolsys.parallel.process.BaseInOutProcess;
import com.revolsys.record.Record;
public class ConverterProcess extends BaseInOutProcess<Record, Record> {
private Converter<Record, Record> converter;
public ConverterProcess() {
}
public ConverterProcess(final Converter<Record, Record> converter) {
this.converter = converter;
}
public Converter<Record, Record> getConverter() {
return this.converter;
}
@Override
protected void process(final Channel<Record> in, final Channel<Record> out, final Record object) {
if (this.converter != null) {
final Record target = this.converter.convert(object);
out.write(target);
}
}
public void setConverter(final Converter<Record, Record> converter) {
this.converter = converter;
}
}
|
/**
* Class ConsoleClientMonitor
*
* @author Paul Hammant
* @version * $Revision: 1.2 $
*/
public class ConsoleClientMonitor implements ClientMonitor {
private final ClientMonitor delegate;
public ConsoleClientMonitor(ClientMonitor delegate) {
this.delegate = delegate;
}
public ConsoleClientMonitor() {
this(new NullClientMonitor());
}
public void methodCalled(Class clazz, final String methodSignature, final long duration, String annotation) {
delegate.methodCalled(clazz, methodSignature, duration, annotation);
}
public boolean methodLogging() {
return false;
}
public void serviceSuspended(Class clazz, final Request request, final int attempt, final int suggestedWaitMillis) {
System.out.println("ConsoleClientMonitor: serviceSuspended: for class'" + clazz.getName() + "' attempt: '" + attempt + "' waiting: '" + suggestedWaitMillis + "'" );
delegate.serviceSuspended(clazz, request, attempt, suggestedWaitMillis);
}
public void serviceAbend(Class clazz, int attempt, IOException cause) {
System.out.println("ConsoleClientMonitor: serviceAbend: for class'" + clazz.getName() + "' attempt: '" + attempt + "' IOException: '" + cause.getMessage() + "'" );
cause.printStackTrace();
delegate.serviceAbend(clazz, attempt, cause);
}
public void invocationFailure(Class clazz, String publishedServiceName, String objectName, String methodSignature, InvocationException ie) {
System.out.println("ConsoleClientMonitor: invocationFailure: for class'" + clazz.getName() + "' publishedServiceName: '" + publishedServiceName +
"' objectName: '" + objectName +
"' methodSignature: '" + methodSignature +
"' InvocationException: '" + ie.getMessage() + "'" );
ie.printStackTrace();
delegate.invocationFailure(clazz, publishedServiceName, objectName, methodSignature, ie);
}
public void unexpectedConnectionClosed(Class clazz, String name, ConnectionClosedException cce) {
System.out.println("ConsoleClientMonitor: unexpectedClosedConnection: for class'" + clazz.getName() + "' name: '" + name + "' ConnectionClosedException: '" + cce.getMessage() + "'" );
cce.printStackTrace();
delegate.unexpectedConnectionClosed(clazz, name, cce);
}
public void unexpectedInterruption(Class clazz, String name, InterruptedException ie) {
System.out.println("ConsoleClientMonitor: unexpectedInterruption: for class'" + clazz.getName() + "' name: '" + name + "'");
ie.printStackTrace();
delegate.unexpectedInterruption(clazz, name, ie);
}
public void classNotFound(Class clazz, String msg, ClassNotFoundException cnfe) {
System.out.println("ConsoleClientMonitor: classNotFound: for class'" + clazz.getName() + "' msg: '" + msg + "' ClassNotFoundException: '" + cnfe.getMessage() + "'" );
cnfe.printStackTrace();
delegate.classNotFound(clazz, msg, cnfe);
}
public void unexpectedIOException(Class clazz, String msg, IOException ioe) {
System.out.println("ConsoleClientMonitor: unexpectedIOException: for class'" + clazz.getName() + "' msg: '" + msg + "' IOException: '" + ioe.getMessage() + "'" );
ioe.printStackTrace();
delegate.unexpectedIOException(clazz, msg, ioe);
}
public void pingFailure(Class clazz, JRemotingException jre) {
System.out.println("ConsoleClientMonitor: pingFailure: for class'" + clazz.getName() + "' JRemotingException: '" + jre.getMessage() + "'" );
}
} |
// Access tltodollar endpoint in ConverOperation module
@GetMapping("/Endpoint")
public String convertOperation() {
String result = restTemplate.getForObject(convertEndPoint,String.class);
return result;
} |
package graphor
import (
"github.com/nosukeru/graphor/auth"
"github.com/nosukeru/graphor/database"
)
var __graphor graphorInterface
func InitializeGraphor() error {
var err error
__graphor, err = newGraphor()
return err
}
func Auth() auth.Auth {
return __graphor.Auth()
}
func db() database.Database {
return __graphor.DB()
}
func Save(model Model, schema Schema) {
__graphor.Save(model, schema)
}
func Delete(model Model) {
__graphor.Delete(model)
}
func HardDelete(model Model) {
__graphor.HardDelete(model)
}
func Mutate(execute func() error) error {
return __graphor.Mutate(execute)
}
func ClearDatabase() error {
return __graphor.ClearDatabase()
}
func MigrateDatabase(body string) error {
return __graphor.MigrateDatabase(body)
}
func ReverseEdge(edge string) string {
return __graphor.ReverseEdge(edge)
}
func IsReversed(edge string) bool {
return __graphor.IsReversed(edge)
}
func BaseMigrations(schemaList []Schema) string {
return __graphor.BaseMigrations(schemaList)
}
func DecodeString(x interface{}) string {
return decodeString(x)
}
func DecodeInt(x interface{}) int {
return decodeInt(x)
}
|
/**
* Our main representable analog. Instances of this class deliver the set of identity morphisms, or
* deliver sets of compositions of morphisms representing new group homomorphisms as functors. More plainly, an array of iterators is returned representing the
* N return tuple '?' elements of the query. If its an identity morphism (instance of Morphism) of three keys (as in the *,*,* query)
* then N = 1 for returned Comparable elements in the stream, since 1 full tuple element is streamed, that being the identity morphism.
* For tuples the array size is relative to the '?' query predicates. <br/>
* Here, the subset, or from beginning parameters to the ending parameters of template element, are retrieved.
* The critical element about retrieving relationships is to remember that the number of elements from each passed
* element of a RelatrixStream is dependent on the number of "?" operators in a 'findSetStream'. For example,
* if we declare findHeadSetStream("*","?","*") we get back a Comparable[] of one element. For findSetStream("?",object,"?") we
* would get back a Comparable[2] array, with each element of the array containing the relationship returned.<br/>
* @author Joonathan Groff Copyright (C) NeoCoreTechs 2014,2015(iterator), 2021 (stream)
*
*/
public class RelatrixSubsetStream<T> implements Stream<T> {
protected Stream stream;
protected Morphism buffer = null;
protected short dmr_return[] = new short[4];
protected boolean identity = false;
/**
* Pass the array we use to indicate which values to return and element 0 counter
* @param dmr_return
* @throws IOException
*/
public RelatrixSubsetStream(Morphism template, Morphism template2, short[] dmr_return) throws IOException {
this.dmr_return = dmr_return;
identity = RelatrixStream.isIdentity(this.dmr_return);
try {
stream = RelatrixKV.findSubMapStream(template, template2);
} catch (IllegalArgumentException | ClassNotFoundException | IllegalAccessException e) {
throw new IOException(e);
}
}
@Override
public Iterator<T> iterator() {
return stream.iterator();
}
@Override
public Spliterator<T> spliterator() {
return stream.spliterator();
}
@Override
public boolean isParallel() {
return stream.isParallel();
}
@Override
public Stream<T> sequential() {
return (Stream<T>) stream.sequential();
}
@Override
public Stream<T> parallel() {
return (Stream<T>) stream.parallel();
}
@Override
public Stream<T> unordered() {
return (Stream<T>) stream.unordered();
}
@Override
public Stream<T> onClose(Runnable closeHandler) {
return (Stream<T>) stream.onClose(closeHandler);
}
@Override
public void close() {
stream.close();
}
@Override
public Stream<T> filter(Predicate<? super T> predicate) {
return stream.filter(predicate);
}
@Override
public <R> Stream<R> map(Function<? super T, ? extends R> mapper) {
return stream.map(mapper);
}
@Override
public IntStream mapToInt(ToIntFunction<? super T> mapper) {
return stream.mapToInt(mapper);
}
@Override
public LongStream mapToLong(ToLongFunction<? super T> mapper) {
return stream.mapToLong(mapper);
}
@Override
public DoubleStream mapToDouble(ToDoubleFunction<? super T> mapper) {
return stream.mapToDouble(mapper);
}
@Override
public <R> Stream<R> flatMap(Function<? super T, ? extends Stream<? extends R>> mapper) {
return stream.flatMap(mapper);
}
@Override
public IntStream flatMapToInt(Function<? super T, ? extends IntStream> mapper) {
return stream.flatMapToInt(mapper);
}
@Override
public LongStream flatMapToLong(Function<? super T, ? extends LongStream> mapper) {
return stream.flatMapToLong(mapper);
}
@Override
public DoubleStream flatMapToDouble(Function<? super T, ? extends DoubleStream> mapper) {
return stream.flatMapToDouble(mapper);
}
@Override
public Stream<T> distinct() {
return stream.distinct();
}
@Override
public Stream<T> sorted() {
return stream.sorted();
}
@Override
public Stream<T> sorted(Comparator<? super T> comparator) {
return stream.sorted(comparator);
}
@Override
public Stream<T> peek(Consumer<? super T> action) {
return stream.peek(action);
}
@Override
public Stream<T> limit(long maxSize) {
return stream.limit(maxSize);
}
@Override
public Stream<T> skip(long n) {
return stream.skip(n);
}
@Override
public void forEach(Consumer<? super T> action) {
stream.forEach(action);
}
@Override
public void forEachOrdered(Consumer<? super T> action) {
stream.forEachOrdered(action);
}
@Override
public Object[] toArray() {
return stream.toArray();
}
@Override
public <A> A[] toArray(IntFunction<A[]> generator) {
return (A[]) stream.toArray(generator);
}
@Override
public T reduce(T identity, BinaryOperator<T> accumulator) {
return (T) stream.reduce(accumulator);
}
@Override
public Optional<T> reduce(BinaryOperator<T> accumulator) {
return stream.reduce(accumulator);
}
@Override
public <U> U reduce(U identity, BiFunction<U, ? super T, U> accumulator, BinaryOperator<U> combiner) {
return (U) stream.reduce(accumulator,combiner);
}
@Override
public <R> R collect(Supplier<R> supplier, BiConsumer<R, ? super T> accumulator, BiConsumer<R, R> combiner) {
return (R) stream.collect(supplier, accumulator, combiner);
}
@Override
public <R, A> R collect(Collector<? super T, A, R> collector) {
return (R) stream.collect(collector);
}
@Override
public Optional<T> min(Comparator<? super T> comparator) {
return stream.min(comparator);
}
@Override
public Optional<T> max(Comparator<? super T> comparator) {
return stream.max(comparator);
}
@Override
public long count() {
return stream.count();
}
@Override
public boolean anyMatch(Predicate<? super T> predicate) {
return stream.anyMatch(predicate);
}
@Override
public boolean allMatch(Predicate<? super T> predicate) {
return stream.allMatch(predicate);
}
@Override
public boolean noneMatch(Predicate<? super T> predicate) {
return stream.noneMatch(predicate);
}
@Override
public Optional<T> findFirst() {
return stream.findFirst();
}
@Override
public Optional<T> findAny() {
return stream.findAny();
}
/**
* iterate_dmr - return proper domain, map, or range
* based on dmr_return values. In dmr_return, value 0
* is iterator for ?,*. 1-3 BOOLean for d,m,r return yes/no
* @return the next location to retrieve or null, the only time its null is when we exhaust the buffered tuples
* @throws IOException
* @throws IllegalAccessException
*/
private Comparable[] iterateDmr() throws IllegalAccessException, IOException
{
int returnTupleCtr = 0;
Comparable[] tuples = new Comparable[RelatrixStream.getReturnTuples(dmr_return)];
//System.out.println("IterateDmr "+dmr_return[0]+" "+dmr_return[1]+" "+dmr_return[2]+" "+dmr_return[3]);
// no return vals? send back Relate location
if( identity ) {
tuples[0] = buffer;
return tuples;
}
dmr_return[0] = 0;
for(int i = 0; i < tuples.length; i++)
tuples[i] = buffer.iterate_dmr(dmr_return);
return tuples;
}
} |
for _ in range (int(input())):
n,m=map(int,input().split())
arr=[]
for i in range(n):
a,b=map(int,input().split())
c,d=map(int,input().split())
arr.append((a,b,c,d))
if(m%2==1):print("NO")
else:
b,x=0,1
for i in range(n):
if(arr[i][1]==arr[i][2]):b=1
if(b==0):x=0
b=0
if(m>=4):
for i in range(n):
for j in range(n):
if(arr[i][1]==arr[j][2] and arr[i][2]==arr[j][1] and arr[i][0]==arr[j][0] and arr[i][3]==arr[j][3]):b=1
if(b==0):x=0
if(x==0):print("NO")
else:print("YES") |
// Get gets execStat for a executor.
func (e *RuntimeStatsColl) Get(planID string) *RuntimeStats {
e.mu.Lock()
defer e.mu.Unlock()
runtimeStats, exists := e.stats[planID]
if !exists {
runtimeStats = &RuntimeStats{}
e.stats[planID] = runtimeStats
}
return runtimeStats
} |
package com.cambrian.common.constant;
/**
* @author kuma 2021-01-03
*/
public class AuthServerConstants {
public static final String SERVICE_CODE = "auth";
public static final class CacheKey {
private CacheKey() {
throw new UnsupportedOperationException("Until class");
}
public static final String VERIFICATION_PREFIX = SERVICE_CODE + ":verifi:";
}
public static final String SESSION_KEY_LOGIN_USER = "loginUser";
}
|
/**
* A local SOCKS5 server which runs as a singleton on the local machine on port 1080 (default SOCKS port).
*
* @author Christian Schudt
*/
final class LocalSocks5Server {
private static final Logger logger = Logger.getLogger(LocalSocks5Server.class.getName());
final Collection<String> allowedAddresses = new CopyOnWriteArrayList<>();
private int port = 1080; // The default port for SOCKS5.
private volatile ServerSocket serverSocket;
private final Map<String, Socket> socketMap = new ConcurrentHashMap<>();
/**
* Starts the local SOCKS5 server.
*/
public void start() {
if (serverSocket == null) {
// Use double-checked locking idiom.
synchronized (this) {
if (serverSocket == null) {
try {
serverSocket = new ServerSocket(getPort());
Thread thread = new Thread(() -> {
while (serverSocket != null) {
Socket socket = null;
try {
socket = serverSocket.accept();
socketMap.put(Socks5Protocol.establishServerConnection(socket, allowedAddresses), socket);
} catch (IOException e) {
if (socket != null) {
try {
socket.close();
} catch (IOException e1) {
logger.log(Level.WARNING, e.getMessage(), e);
}
}
}
}
});
thread.setDaemon(true);
thread.start();
} catch (IOException e) {
logger.log(Level.WARNING, e.getMessage(), e);
}
}
}
}
}
/**
* Stops the server.
*/
public synchronized void stop() {
if (serverSocket != null) {
try {
// This will close the socket and interrupts the accept() method.
serverSocket.close();
serverSocket = null;
} catch (IOException e) {
logger.log(Level.WARNING, e.getMessage(), e);
}
}
}
/**
* Gets the port. If no port has been set, the default port (1080) is returned.
*
* @return The port.
*/
public int getPort() {
return port;
}
/**
* Sets the port this local server will run on.
*
* @param port The port.
*/
public void setPort(int port) {
this.port = port;
}
/**
* Gets the port.
*
* @return The port.
*/
public String getAddress() throws XmppException {
try {
return InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
throw new XmppException(e);
}
}
/**
* Gets the socket for the destination address.
*
* @param destinationAddress The destination address.
* @return The socket.
*/
public Socket getSocket(String destinationAddress) {
return socketMap.remove(destinationAddress);
}
} |
/**
* Sample for given number of iterations
* @param numIters Number of iterations
* @throws IOException IOException
*/
public void sample(int numIters) throws IOException
{
for (int iteration=1; iteration<=numIters; iteration++)
{
for (int lang=0; lang<param.numLangs; lang++)
{
for (int doc=0; doc<numDocs[lang]; doc++)
{
sampleDoc(doc, lang, iteration>50);
}
computeLogLikelihood(lang);
perplexity[lang]=Math.exp(-logLikelihood[lang]/numTestWords[lang]);
if (param.verbose)
{
if (lang==0)
{
IOUtil.print("<"+iteration+">");
}
IOUtil.println("\tLang "+lang+" Log-LLD: "+format(logLikelihood[lang])+
"\tPPX: "+format(perplexity[lang]));
}
if (param.updateAlpha && iteration%param.updateAlphaInterval==0 && type==TRAIN)
{
updateHyperParam(lang);
}
if (type==TRAIN)
{
computeOmega(lang);
computeWordWeights(lang);
updateRho(lang);
computeTOmega(lang);
}
}
if (param.verbose && type==TRAIN)
{
for (int l1=0; l1<param.numLangs; l1++)
{
for (int l2=0; l2<param.numLangs; l2++)
{
if (l1==l2) continue;
double avgEntropy=0.0;
for (int t2=0; t2<param.numTopics[l2]; t2++)
{
double entropy=0.0;
for (int t1=0; t1<param.numTopics[l1]; t1++)
{
if (rho[l1][l2][t1][t2]>0.0)
{
entropy-=rho[l1][l2][t1][t2]*Math.log(rho[l1][l2][t1][t2]);
}
}
avgEntropy+=entropy;
}
IOUtil.println("\tAvg Entropy: "+avgEntropy/param.numTopics[l2]);
double avgScore=0.0;
double avgTopScore=0.0;
for (int t1=0; t1<param.numTopics[l1]; t1++)
{
double max=Double.MIN_VALUE;
for (int t2=0; t2<param.numTopics[l2]; t2++)
{
avgScore+=rho[l1][l2][t1][t2];
if (rho[l1][l2][t1][t2]>max)
{
max=rho[l1][l2][t1][t2];
}
}
avgTopScore+=max;
}
avgScore/=(double)param.numTopics[l1]*param.numTopics[l2];
avgTopScore/=(double)param.numTopics[l1];
IOUtil.println("\tAvg Link Score of Languages "+l1+" and "+l2+": "+avgScore);
IOUtil.println("\tAvg Top Link Score of Languages "+l1+" and "+l2+": "+avgTopScore);
IOUtil.println("\tRatio of Top to All of Languages "+l1+" and "+l2+": "+Math.abs(avgTopScore/avgScore));
}
}
}
}
if (type==TRAIN && param.verbose)
{
for (int lang=0; lang<param.numLangs; lang++)
{
for (int topic=0; topic<param.numTopics[lang]; topic++)
{
IOUtil.println(topWordsByFreq(lang, topic, param.numTopWords));
}
}
}
} |
<gh_stars>0
import { defineConfig } from 'vite'
import vue from '@vitejs/plugin-vue'
import legacy from '@vitejs/plugin-legacy'
import { resolve } from 'path'
// https://vitejs.dev/config/
export default defineConfig({
// 要用到的插件数组
plugins: [
vue(),
legacy({
targets: ['defaults', 'not IE 11']
})
],
// 开发或生产环境服务的公共基础路径,可以是/foo/、https://foo.com/、空字符串或./(用于开发环境) 几种类型,这个选项也可以通过命令行参数指定(例:vite build --base=/my/public/path/)
base: './',
// 静态资源服务的文件夹, 默认"public"
publicDir: 'public',
resolve: {
alias: {
'@src': resolve(__dirname, './src')
}
},
server: {
open: false,
https: false,
proxy: {
'/api': {
target: 'http://www.api.com',
changeOrigin: true
}
}
},
build: {
// 指定输出路径,默认'dist'
outDir: 'dist',
// 指定生成静态资源的存放路径(相对于build.outDir)
assetsDir: 'assets',
// 小于此阈值的导入或引用资源将内联为base64编码,设置为0可禁用此项。默认4096(4kb)
assetsInlineLimit: 4096,
// 启用/禁用CSS代码拆分,如果禁用,整个项目的所有CSS将被提取到一个CSS文件中,默认true
cssCodeSplit: true,
// 构建后是否生成source map文件,默认false
sourcemap: false,
// 为true时,会生成manifest.json文件,用于后端集成
manifest: false
}
})
|
def save(self):
settings_before = get_all_data()
settings.update()
settings.hide()
settings_after = get_all_data()
if (
settings_before[Settings.PC] != settings_after[Settings.PC] or
settings_before[Settings.ENCRYPTION] != settings_after[Settings.ENCRYPTION] or
settings_before[Settings.PASS] != settings_after[Settings.PASS]
):
self.stop_serverclient()
self.init_serverclient(settings_after[Settings.PC]) |
def create_hcp_positions(l):
r = 0.5
row_height_shift = r * np.sqrt(3)
plane_height_shift = np.sqrt(6) * r * 2.0 / 3.0
n_row = int(np.floor(l))
row0 = np.array([np.arange(n_row), np.zeros(n_row), np.zeros(n_row)]).T
plane0 = np.array([row0 + np.array([r if i%2==1 else 0, i*row_height_shift, 0])
for i in xrange(int(np.floor(l / row_height_shift)))])
planes = np.array([plane0 + np.array([r if i%2==1 else 0,
np.sqrt(3)/3*r if i%2==1 else 0,
i * plane_height_shift])
for i in xrange(int(np.floor(l / plane_height_shift)))])
sites = planes.reshape((np.prod(planes.shape[:3:]), 3))
return sites |
/// Commit all created properties.
///
/// This will also clear the property buffer.
pub fn commit_all(&mut self) -> Result<(), StateErr> {
for (key, space) in self.properties.drain() {
Self::commit_pair(self.store_fn, self.handle, key, space)?;
}
Ok(())
} |
def match(self, charge):
for expr in self.exprs:
if expr.match(charge):
return True
return False |
/******************************************************************************
* Function name : InitialiseLCD
* Description : Initializes the LCD display.
* Arguments : none
* Return Value : none
******************************************************************************/
void InitialiseLCD(void)
{
if (GlyphOpen(&G_lcd, 0) == GLYPH_ERROR_NONE)
{
GlyphNormalScreen(G_lcd) ;
GlyphSetFont(G_lcd, GLYPH_FONT_8_BY_8) ;
GlyphClearScreen(G_lcd) ;
}
} |
/*
* ProcWaitForSignal - wait for a signal from another backend.
*
* As this uses the generic process latch the caller has to be robust against
* unrelated wakeups: Always check that the desired state has occurred, and
* wait again if not.
*/
void
ProcWaitForSignal(void)
{
WaitLatch(MyLatch, WL_LATCH_SET, 0);
ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
} |
<reponame>mytimes/magic-api<filename>magic-api-spring-boot-starter/src/main/java/org/ssssssss/magicapi/spring/boot/starter/ResponseCodeConfig.java
package org.ssssssss.magicapi.spring.boot.starter;
/**
* json结果code配置
* @since 1.1.2
*/
public class ResponseCodeConfig {
/**
* 执行成功的code值
*/
private int success = 1;
/**
* 参数验证未通过的code值
*/
private int invalid = 0;
/**
* 执行出现异常的code值
*/
private int exception = -1;
public int getSuccess() {
return success;
}
public void setSuccess(int success) {
this.success = success;
}
public int getInvalid() {
return invalid;
}
public void setInvalid(int invalid) {
this.invalid = invalid;
}
public int getException() {
return exception;
}
public void setException(int exception) {
this.exception = exception;
}
}
|
t=int(input())
for i in range(t):
a=int(input())
b=input()
if "8" not in b:
print("NO")
else:
c=b.index("8")
d=len(b)-c
if d>=11:
print("YES")
d=0
else:
print("NO")
d=0 |
We all know San Diego Comic-Con is more than just comics. Yet despite all the great panels and events that happen throughout the convention, nothing gets quite the attention than a big Hollywood reveal on the stage of the Convention Center’s Hall H. Tom Hiddleston as Loki? Warner Bros. taking the wraps off of Godzilla for the first time? The Avengers presented on stage with Joss Whedon? Those moments and more might not be your personal favorites, but they’re the moments everyone remembers and talks about for years.
Every year, we take a gander at the theatrical release schedule and put down our best guesses as to which films might be filling up Hall H in July.
1. Hercules
Release Date: July 25, 2014
We already know Hercules will have a presence at SDCC, because it was revealed by its production company, way back in January. But the question here is, what kind of presence? Its release date puts it in a difficult situation. Should it have a panel, it would only make sense to hold it on Thursday, the day prior to its theatrical release. Or as we’ve seen in the past with movies release during Comic-Con, perhaps it might have special Thursday premiere showing with cast and crew, or some other offsite promotion. Nonetheless, whether we’ll be seeing star Dwayne Johnson or director Brett Ratner in support of the film, we will definitely be seeing Hercules in some form during the weekend.
2. Guardians of the Galaxy
Release Date: August 1, 2014
We’ve been hearing that Marvel will be pushing Guardians of the Galaxy hard during Comic-Con this year. If not an early screening during the convention, perhaps the company will give Hall H attendees an extended sneak preview during its annual panel. After all, Marvel showed an early look of the characters last year after only filming for two weeks. Expect to be humming “Hooked on a Feeling” throughout the weekend.
3. Into the Storm
Release Date: August 8, 2014
Here’s another film we know will be at Comic-Con, thanks to star Richard Armitage who mentioned it during an interview this year. Distributed by Warner Bros., it was already at WonderCon back in April, so we’re left wondering if this will even be at the studio’s big panel event. It’s also not a great fit genre-wise. Sure, it has a fan-favorite star and big special effects, but this is Comic-Con, not Meteorologist-Con. Warner Bros. sure does like to pack its panels full of whatever it has on its upcoming release schedule, so who knows. It could be a trailer and a quick Q&A, or maybe just a booth appearance. Whatever it is, Armitage will be there. Whether that bodes well for another film he’ll be in later this year, we’ll see…
4. Lucy
Release Date: July 25, 2014
Here’s another film with a release date smack dab in the middle of Comic-Con. And, like Into The Storm, it’s another film that already had a panel at WonderCon last month. We’d count this out for the aforementioned reasons if it weren’t for two things: One, it’s got Scarlett Johannson, who although is in the middle of a pregnancy, might also be in San Diego for The Avengers: Age of Ultron; and two, it’s Universal, which really doesn’t have anything to show at Comic-Con this year, sans a big budget sequel (more about that later). So we would expect to see some promotion for this film during the weekend at the very least.
5. The Expendables 3
Release Date: August 15, 2014
And yet another film which a star spilled the beans regarding an appearance. Terry Crews mentioned earlier this year during an interview he would be back at Comic-Con for The Expendables 3. Now, he didn’t say what kind of appearance that would be, we can only look to the franchise’s past to see what might be in store for attendees this year. The Expendables surprised a Hall H audience way back in 2010, and again with the sequel (although to diminishing returns) in 2012. With this being advertised as the last film in the franchise, we would expect nothing else than for Stallone and the gang to go out with a bang (pun intended).
There’s also the Ford factor here – Harrison Ford, that is, starring in the film along with newcomers Mel Gibson, Wesley Snipes and Antonio Banderas. If Ford is advertised to appear in support of The Expendables 3, there’s going to be a lot of people expecting him to also make an appearance for Star Wars Episode VII as well. Or maybe he doesn’t show up here at all, in order to keep any Star Wars appearance a surprise.
6. Frank Miller’s Sin City: A Dame to Kill For
Release Date: August 22, 2014
Robert Rodriguez. Frank Miller. The sequel to the immensely popular Sin City.
Of course it will be at Comic-Con. The only questions would be, in what way? Will it be a huge Hall H presentation with a star-studded list of panel guests? Or will it be an intimate panel with fan-favorite director Rodriquez and some big offsite push? Regardless, this is tailor-made for Comic-Con fans, so expect something really cool for attendees.
7. The Maze Runner
Release Date: September 19, 2014
As far back as last fall, star Dylan O’Brien mentioned during an interview that the film would be making some sort of an appearance at Comic-Con. The release date makes sense for it to be in San Diego this July, and the endless misses of YA adaptations with the exception of Twilight and The Hunger Games keep studios pouring money into marketing and advertising in hopes of breaking that streak. The Maze Runner was already at WonderCon though, so we don’t know if it makes sense for it to be part of 20th Century Fox’s annual panel presentation, or maybe this just makes a booth appearance with a signing. We’ll know more for sure in the next few weeks.
8. The Hunger Games: Mockingjay Part 1
Release Date: November 21, 2014
The Hunger Games film franchise has always been at Comic-Con, but 2013 was the first year it actually had it’s own panel in Hall H. Heading into its final act, it would be a given to make a return appearance if it weren’t for the fact that it doesn’t even need to – the film will be a blockbuster success regardless. And since it’s only “Part 1” of a two-part finale filming back-to-back, perhaps it will pull a Hobbit and forego rolling out the red carpet with cast and crew due this year to the rigorous production schedule. It would be a shame, but not a surprise. We do know that SDCC plays a key part in Liongate’s marketing campaign, so at the very least there should be a big booth push.
9. and 10. Home and The Penguins of Madagascar
Release Dates: Home – March 27, 2015; The Penguins of Madagascar – November 26, 2014
DreamWorks Animation has in recent years had an understated presence at Comic-Con, despite star-studded casts filling up its popular franchises. But that was before 20th Century Fox became its distribution partner. Could we expect to see more animation films play a part in Fox’s annual Hall H presentation? After all, it worked for The LEGO Movie last year. And this year’s slate of Fox’s animation films has some pretty significant star power with the Comic-Con audience.
First, there’s Home, a film not slated for release until spring of 2015 but stars The Big Bang Theory’s Jim Parsons as a member of a friendly alien race who hides out on Earth with a teenage girl, played by Jennifer Lopez. Aliens and Jim Parsons? That seems like a good candidate for Comic-Con. But release date, and the history of animation films at SDCC, put this one clearly in the “wishful thinking” category.
Then there’s The Penguins of Madagascar, the film spin-off of the popular DreamWorks Animation franchise. However the film stars Hobbit and Sherlock star Benedict Cumberbatch, along with John Malkovich, Ken Jeong, and Andy Richter. Plus, the Penguins skew a little older and any panel with that line-up would certainly keep the Hall H audience entertained as they wait for the latest Hollywood blockbuster news and previews. And since the movie releases this year, we think this one is a little more likely to have a presence at Comic-Con.
11. The Hobbit: The Battle of The Five Armies
Release Date: December 17, 2014
Jackson regretfully cancelled any Hobbit appearance at Warner Bros. big Hall H presentation last year due to conflicts with the production schedule. But this is the last Hobbit film, and likely the last Tolkien film adaptation in some time, if not ever by Jackson himself. We fully expect Warner Bros. to roll out the red carpet to Jackson and his cast and let them take one final bow in front of thousands of gracious fans on Comic-Con’s grandest stage.
12., 13. 14. and 15. Big Hero 6, Into The Woods, Cinderella, Tomorrowland
Release Dates: Big Hero 6 – November 6, 2014; Into The Woods – December 25, 2014; Cinderella – March 13, 2015; Tomorrowland – May 22, 2015
It’s been two years since Disney proper has made its studio presence known at Comic-Con. Last year’s absence is something we call the “D23 Effect”. Every two years, Disney holds its own fan convention, and famously holds back from showing off its big upcoming properties at other conventions so that D23 could have the exclusive reveal. But 2014 is an off-year for D23, so we fully expect Disney to be back with a Hall H presentation in July. Two years ago, Disney showed off Wreck It Ralph, Oz: The Great and Powerful, Frankenweenie and The Lone Ranger. This year’s slate includes possible panel inclusions such as the film adaptation of Broadway’s Into the Woods; the live-action adaptation of Cinderella, and Tomorrowland, backed by George Clooney, Brad Bird and Damon Lindelof. But if there’s one sure-thing, it’s the first Disney Animation production of a Marvel license, Big Hero 6, which we’re hearing is a shoo-in for July.
16. The Man From U.N.C.L.E.
Release Date: January 16, 2015
As Warner Bros. prides itself with parading an extensive slate of upcoming productions during its annual presentation, we think on paper there’s a good chance we’ll be seeing The Man From U.N.C.L.E., the big-screen adaptation of the 1960s television show. After all, it stars Superman himself, Henry Cavill, so if he’s in San Diego for anything else, he might also be asked to come out on stage for a quick Q&A after a teaser trailer to drum up buzz for the Guy Ritchie-helmed feature.
17. Seventh Son
Release Date: February 6, 2015
Seventh Son, starring Kit Harington and Jeff Bridges, was shown last year during Legendary Entertainment’s presentation with then-distributing partner Warner Bros. Since then, the film was delayed nearly a year, and then moved to new distributor Universal Studios with Legendary’s new deal. If Universal or Legendary is to have its own Hall H presentation this year, we expect this to be back on the slate for 2014. After all, we haven’t heard much from this production for nearly ten months, so getting this back in front of the Comic-Con mindshare will be good.
18. Warcraft
Release Date: March 11, 2016
It’s crazy to think that a movie with a spring 2016 release date would be a lock for Comic-Con, but Legendary Entertainment’s CEO Thomas Tull has already teased the film will be getting some attention at SDCC this year. It has recently wrapped principal photography, and Tull stated “he wouldn’t be surprised” to see something this year, with chances high it will be a new trailer.
19. The Avengers: Age of Ultron
Release Date: May 1, 2015
The best odds for an appearance at Comic-Con this year have to be on The Avengers: Age of Ultron. After all, Marvel didn’t hold back last year even though it was a D23 year. And with Batman vs. Superman: Dawn of Justice moving out to 2016, the stage is clearly Marvel’s to own. Aside from New York Comic-Con, there really isn’t a better opportunity to get Ultron in front of fans before next spring’s release. Even franchise-newcomer Aaron Taylor-Johnson, who is playing Quicksilver in the film, has stated publicly his appearance at SDCC this year. If The Avengers doesn’t show, prepare for a riot in Hall H.
20. Mad Max: Fury Road
Release Date: May 15, 2015
Mad Max: Fury Road has been a difficult production. Starring Tom Hardy, Charlize Theron and Nicolas Hoult and directed by returning franchise director George Miller, the film completed principal photography in 2013, with reshoots continuing into 2014. It’s a big $100 million production, so we think Warner Bros. will want to create some early positive buzz with a welcome Hall H audience and the world’s media at its attention.
21. Jurassic World
Release Date: June 12, 2015
Jurassic World has a lot going for it to make an appearance at Comic-Con this year. For one, it’s the latest in storied action franchise started by Steven Spielberg. Although the sequels have taken their lumps throughout the years, the franchise still remains popular among the SDCC crowd. Two, it’s been shooting since April, so there should be plenty of footage to put together into a teaser trailer. Three, it’s got Judy Greer (Tomorrowland) and Chris Pratt (Guardians of the Galaxy) among its cast, putting two of its stars already in San Diego for their other respective films. And four, it’s 2015 release date is before the next San Diego Comic-Con, being this the one and only chance to cause a packed Hall H to erupt with excitement. Purely speculation, but the points above make it a good chance for a Jurassic World appearance.
22. Entourage
Release Date: June 12, 2015
At first glance, the idea of Vince and his Hollywood posse making an appearance at Comic-Con a little odd. After all, it’s not even a genre film. But the history of Entourage as an HBO series has deep roots in SDCC. Way back in the show’s season two, the cast made a trip to San Diego to promote the fictional Aquaman film, directed by James Cameron, as well as Johnny Drama’s dive into fandom with the outpouring of love for his Viking Quest series. Some say it was this episode which aired back in 2005 as the reason for Comic-Con’s sudden increase in popularity among the general public. It would only then make sense for studio Warner Bros. to repay the Comic-Con audience with an early look at the film and some access to the cast.
23 and 24. The Fantastic Four and X-Men: Apocalypse
Release Dates: The Fantastic Four – June 19, 2015; X-Men: Apocalypse – May 27, 2016
20th Century Fox have been the first major studio to release a big Marvel franchise movie (X-Men). But in the years that follow, it’s had its share of misses with X-Men: The Last Stand and the two Fantastic Four movies. It’s gotten back on track as of late, with the superb The Wolverine and X-Men: Days of Future Past movies, and the studio is looking to take on Marvel and Sony by furthering its own Marvel Cinematic Universe with a Fantastic Four reboot in 2015. The film has already started shooting, so it should have something to finally show attendees in Hall H after years of rumors, speculation and false starts.
And might we also hear casting news on X-Men: Apolcalypse? It’s too early to tell, as shooting won’t start until spring of 2015, but who will play Apocalypse, the big bad himself? And might we also hear more about the announced X-Force and Deadpool movie adaptations? Let’s hope so.
25. Terminator: Genesis
Release Date: July 1, 2015
Doctor Who‘s Matt Smith in a Terminator movie? Yes please. And we speak for the 6000+ fans who will undoubtedly fill Hall H to get a glimpse of the 11th Doctor fighting with, or perhaps alongside, Arnold Schwarzenegger. It has already started filming, and its release date is just prior to next year’s Comic-Con, making this a likely candidate for a 2014 appearance.
26. Ant-Man
Release Date: July 17, 2015
The recent news of Ant-Man‘s director Edgar Wright leaving the production over “creative differences” has thrown this production in turmoil, at least in the minds of fans. After all, Wright has been acting as champion of the Marvel character’s big screen debut – and in front of a Comic-Con audience, mind you – as far back as 2006. But despite the serious change in personnel, and given the script still seems to be up in the air, Marvel has not yet changed the 2015 release date. That seems to be a pretty aggressive schedule, even if Wright had stayed aboard.
But now things have changed, and what better way to turn around the production in the eyes of fans than to announced Wright’s replacement, show off some promising teaser footage and show that things are back on track. Or, Marvel could stay away from any controversy altogether and hold any public announcements for now. After all, if Ant-Man‘s release date doesn’t change, Marvel still has one more shot to get in front of a Comic-Con audience in 2015. And quite frankly, a panel full of Avengers: Age of Ultron and Guardians of the Galaxy might just be enough for most.
27. Star Wars Episode VII
Release Date: December 18, 2015
Okay, this is the big one. The panel that most attendees have been hoping for. It’s been nearly ten years since a new Star Wars movie has graced the hallowed Hall H, but this one is more important. Disney, acting as the Star Wars Social Services, has taken the franchise from creator George Lucas and hopes to erase the bad taste of the prequels and endless special editions. Things haven’t started out as good as we had hoped though, playing with tradition by moving the release date to December 2015, re-writing the screenplay which was originally drafted by Oscar-winner Michael Arndt, and calling into question the decision to name J. J. Abrams director out of the fallout from Star Trek Into Darkness. Yet fans, including ourselves, remain hopeful.
And early buzz seems to indicate we might just get that hope (a “new” hope, mind you) with some sort of Star Wars Episode VII presence at Comic-Con this year. Perhaps it’s the rumored title reveal, something that has had a bit of tradition at Comic-Con from Episode III. You can see this reveal from 2004 below:
The film is currently in production, and we think there’s a good chance Abrams would use some of that to make a special teaser for Comic-Con attendees at the tail end of any Disney studio presentation. Or perhaps Disney and Lucasfilm would drop additional character reveals, since according to an article on Slashfilm, “several roles have yet to be filled, including a major female part.” And we think if the cast – including Hamill, Ford and Fisher – made an Avengers-type appearance on stage in Hall H…well, there probably wouldn’t be anything left of Hall H anymore.
What’s missing?
CCI has never been big on bringing horror movies inside the Convention Center – that genre is more likely covered by the folks at NYCC in October. So no Poltergeist, Amityville, Lazarus, Friday the 13th, or Insidious Chapter 3. Sorry, horror fans.
Bonus Round
Comic-Con weekend has always been host to one strangely fitting indie film per year. Last year it was The To-Do List with Aubrey Plaza and Clark Gregg (Agents of S.H.I.E.L.D.); the year before that was Hit and Run, directed by Dax Shepard and staring his wife, Kristen Bell (Veronica Mars). So we’re going to stick our neck out and guess this year’s movie.
Without further ado, our guess for “Weird Indie Movie at Comic-Con” is…
What If
Release Date: August 1, 2014
What If stars Harry Potter himself, Daniel Radcliffe, a love story between his character and “an animator who lives with her longtime boyfriend Ben”. A lead character who is an animator? That’s a loosely tied connection to Comic-Con if we ever saw one. And it stars Radcliffe, who will be in the U.S. for his Broadway stint in The Cripple of Inishmaan, due to end on July 20 (and right before Comic-Con, see?). These indie appearances tend to be off-site, but an appearance by Radcliffe would undoubtedly create attention, among the Comic-Con fans, not to mention the media buzz that would follow ahead of its August release date.
What films are you looking forward to most at this year’s Comic-Con? Which films do you think we missed? Which guesses do you think we got wrong? Let us know in the comments. |
April 5, 2015; Sacramento, CA, USA; Utah Jazz forward Gordon Hayward (20) looks on during the first quarter against the Sacramento Kings at Sleep Train Arena. Mandatory Credit: Kyle Terada-USA TODAY Sports
I need to make a confession–I’m a hardcore gamer. As much as I love the Utah Jazz and NBA basketball, I derive an equal amount of pleasure from dungeon raids, leveling up and GLaDOS condescending to me as I attempt to solve a puzzle. However, my enthusiasm for the medium isn’t shared by some.
Despite the growing prominence of video games in the media and in society at large, Fox Sports Radio host Colin Cowherd continues to rail against gamers and the rise of competitive eSports. In response to news that Turner Broadcasting is getting into the eSports game, Cowherd had this to say–
Spoken like a true 80s teen movie villain. Obviously, this didn’t sit well with the gaming crowd. One notable gamer that took umbrage with Cowherd’s statements was Gordon Hayward. The Jazz star plays video games himself, has done so competitively and famously declared that he would PWN LeBron James in League of Legends last year.
In response to Cowherd’s diatribe, Hayward tweeted the following–
Hold up…@ColinCowherd is making fun of gamers for being dorks? People in glass houses… pic.twitter.com/L5UjhErkyT — Gordon Hayward (@gordonhayward) September 30, 2015
Umm, burned?
Hayward undoubtedly wins the day for his defense of gamer-kind. Being the misinformed bully that he is, Cowherd may get off on tearing us down and calling us dorks. The fact remains that gamers come in all shapes and sizes and from varying walks of life. We are men, women, children, Hollywood actors and even star athletes.
Not all gamers are dorks with Star Wars collections, although I could personally thrill you with my diorama of Han and Chewie squaring off with several Imperial Stormtroopers
My advice to Cowherd would be to tread lightly when broaching this subject in the future. Not only is he vastly outnumbered, but some gamers look like this–
#Guns. |
///////////////////////////////////////////////////////////////////////////////
// Interface ITcPostCyclic: Cyclic execution (post output update)
///////////////////////////////////////////////////////////////////////////////
HRESULT CWaveSimulatorNew::PostCyclicUpdate(ITcTask* ipTask, ITcUnknown* ipCaller, ULONG_PTR context)
{
HRESULT hr = S_OK;
TCMATSIM_FPUCTRL_INIT
TcTry{
if (SUCCEEDED(hr))
hr = VerifyCaller(ipTask, context);
if (SUCCEEDED(hr))
hr = GetLastMdlErrorStatus();
if (SUCCEEDED(hr) && m_CycleUpdateExecuted){
m_ContextInfoArr[context].ExecutingPostCyclicCall = true;
switch(m_ExecutionSequence){
case ExecutionSequence_IoAtTaskBegin:
MdlUpdate(m_ContextInfoArr[context].SimulinkTid);
MdlOutputs(m_ContextInfoArr[context].SimulinkTid);
break;
case ExecutionSequence_StateUpdateAfterOutputMapping:
MdlUpdate(m_ContextInfoArr[context].SimulinkTid);
break;
case ExecutionSequence_StateUpdateBeforeOutputUpdate:
break;
}
}
if (SUCCEEDED(hr))
hr = EvalMdlErrorStatus(rtmGetErrorStatus((&m_SimStruct)));
}
TcFinally{
TCMATSIM_FPUCTRL_RESTORE
m_ContextInfoArr[context].ExecutingPostCyclicCall = false;
} |
// Copyright 2019 ETH Zurich, Anapaya Systems
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"fmt"
"strings"
"github.com/google/gopacket"
"github.com/scionproto/scion/go/border/braccept/shared"
"github.com/scionproto/scion/go/lib/common"
"github.com/scionproto/scion/go/lib/ctrl/path_mgmt"
"github.com/scionproto/scion/go/lib/infra"
)
var _ TaggedLayer = (*IFStateReqTaggedLayer)(nil)
type IFStateReqTaggedLayer struct {
gopacket.Payload
path_mgmt.IFStateReq
tagged
options
}
// IFStateReqParser parses an Interface State Request with the following syntax:
//
// IFStateReq: IfID=121
//
func IFStateReqParser(lines []string) TaggedLayer {
// default IFStateReq layer values
i := &IFStateReqTaggedLayer{}
i.Update(lines)
return i
}
func (p *IFStateReqTaggedLayer) Layer() gopacket.Layer {
return &p.Payload
}
func (i *IFStateReqTaggedLayer) Clone() TaggedLayer {
clone := *i
return &clone
}
func (i *IFStateReqTaggedLayer) Update(lines []string) {
if i == nil {
panic(fmt.Errorf("IFStateReq Tagged Layer is nil!\n"))
}
// IFStateReq is either single line, or two lines with second being the revocation
if len(lines) != 1 {
panic(fmt.Errorf("Bad IFStateReq layer!\n%s\n", lines))
}
_, tag, kvStr := decodeLayerLine(lines[0])
i.tag = tag
kvs := getKeyValueMap(kvStr)
i.updateFields(kvs)
pmpld, err := path_mgmt.NewPld(&i.IFStateReq, nil)
if err != nil {
panic(fmt.Sprintf("Failed to generate PathMgmt payload: %s\n%s\n",
err, strings.Join(lines, "\n")))
}
blob, err := shared.CtrlCapnpEnc(infra.NullSigner, pmpld)
if err != nil {
panic(fmt.Sprintf("Failed to Sign IFStateReq: %s\n%s\n", err, strings.Join(lines, "\n")))
}
i.Payload = make([]byte, len(blob))
copy(i.Payload, blob)
}
func (i *IFStateReqTaggedLayer) updateFields(kvs propMap) {
for k, v := range kvs {
switch k {
case "IfID":
i.IfID = common.IFIDType(StrToInt(v))
default:
panic(fmt.Errorf("Unknown IFStateReq field: %s", k))
}
}
}
var _ TaggedLayer = (*IFStateInfoTaggedLayer)(nil)
type IFStateInfoTaggedLayer struct {
gopacket.Payload
path_mgmt.IFStateInfo
RevInfo
tagged
options
}
// IFStateInfoParser parses an Interface State Info with the following syntax:
//
// IFStateInfo: IfID=121 Active=true
//
// IFStateInfo: IfID=121 Active=false
// SignRevInfo: IfID=121 IA=1-ff00:0:1 Link=peer TS=now TTL=60
//
// The SignedRevInfo is optional.
//
func IFStateInfoParser(lines []string) TaggedLayer {
// default IFStateInfo layer values
i := &IFStateInfoTaggedLayer{}
i.Update(lines)
return i
}
func (p *IFStateInfoTaggedLayer) Layer() gopacket.Layer {
return &p.Payload
}
func (i *IFStateInfoTaggedLayer) Clone() TaggedLayer {
clone := *i
return &clone
}
func (i *IFStateInfoTaggedLayer) Update(lines []string) {
if i == nil {
panic(fmt.Errorf("IFStateInfo Tagged Layer is nil!\n"))
}
// IFStateInfo is either single line, or two lines with second being the revocation
if len(lines) < 1 || len(lines) > 2 {
panic(fmt.Errorf("Bad IFStateInfo layer!\n%s\n", strings.Join(lines, "\n")))
}
line := lines[0]
_, tag, kvStr := decodeLayerLine(line)
i.tag = tag
kvs := getKeyValueMap(kvStr)
i.updateFields(kvs)
if len(lines) == 2 {
layerType, _, kvStr := decodeLayerLine(lines[1])
if layerType != "SignedRevInfo" {
panic(fmt.Errorf("Bad SignedRevInfo layer!\n%s\n", lines[1]))
}
kvs := getKeyValueMap(kvStr)
i.RevInfo.updateFields(kvs)
i.SRevInfo = i.RevInfo.sign()
} else {
// If we are updating and the layer is not present, remove it.
i.SRevInfo = nil
}
infos := &path_mgmt.IFStateInfos{Infos: []*path_mgmt.IFStateInfo{&i.IFStateInfo}}
pmpld, err := path_mgmt.NewPld(infos, nil)
if err != nil {
panic(fmt.Sprintf("Failed to generate PathMgmt payload: %s\n%s\n",
err, strings.Join(lines, "\n")))
}
blob, err := shared.CtrlCapnpEnc(infra.NullSigner, pmpld)
if err != nil {
panic(fmt.Sprintf("Failed to Sign IFStateInfo: %s\n%s\n", err, strings.Join(lines, "\n")))
}
i.Payload = make([]byte, len(blob))
copy(i.Payload, blob)
}
func (i *IFStateInfoTaggedLayer) updateFields(kvs propMap) {
for k, v := range kvs {
switch k {
case "IfID":
i.IFStateInfo.IfID = common.IFIDType(StrToInt(v))
case "Active":
i.IFStateInfo.Active = StrToBool(v)
default:
panic(fmt.Errorf("Unknown IFStateInfo field: %s", k))
}
}
}
|
// FileServerHandler implements the actual fileserver logic. zipfs can be set to nil if no pre-zipped file have been prepared.
func FileServerHandler(fs fs.FS, zipfs fs.FS, fallbackFilepath string, config *Config) http.Handler {
fallbackFetcher := fallbackFetch(fallbackFilepath,
fetchWrapperZipOrNot(config.GzipFileExtensions(),
fsFetch("zipped", zipfs, true, nil),
fsFetch("unzipped", fs, false, nil)))
fetcher := fetchWrapperZipOrNot(config.GzipFileExtensions(),
fsFetch("zipped", zipfs, true, fallbackFetcher),
fsFetch("unzipped", fs, false, fallbackFetcher))
handler := &fileserverHandler{
fetch: fetcher,
mediaTypeMap: config.MediaTypeMap,
}
return handler
} |
/**
* Network utility class.
*/
public abstract class NetworkUtils {
/**
* Decides if low-resolution ("sample") images should be fetched by default instead of full-size images, based on:
* - Screen density
* - Network link speed and quality
* - Is the network metered? ($$$ per MB)
*
* @return true if low-resolution images should be used.
*/
public static boolean shouldFetchImageSamples(Context context) {
// Note that the low-resolution images aren't actually that bad unless the user zooms in on them.
// They're meant for cases where the original image is much larger than an average desktop browser window.
// Check screen resolution.
if (context.getResources().getDisplayMetrics().density <= 1.0) {
return true;
}
// Get system connectivity manager service.
ConnectivityManager cm = (ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE);
// Check if network is metered.
if (isActiveNetworkMetered(cm)) {
return true;
}
// Check link quality.
NetworkInfo networkInfo = cm.getActiveNetworkInfo();
return !isConnectionFast(networkInfo.getType(), networkInfo.getSubtype());
}
/**
* Check if active connection is metered. (API 16+)
*
* @param cm Instance of {@link android.net.ConnectivityManager}
* @return true if user pays for bandwidth.
*/
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
private static boolean isActiveNetworkMetered(ConnectivityManager cm) {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN && cm.isActiveNetworkMetered();
}
/**
* Check if given connection type is fast enough to download high res images.
*
* @param type Connection type constant, as specified in {@link android.net.ConnectivityManager}.
* @param subType Connection subtype constant, as specified in {@link android.telephony.TelephonyManager}.
* @return true if high res images should be downloaded by default.
*/
private static boolean isConnectionFast(int type, int subType) {
if (type == ConnectivityManager.TYPE_WIFI || type == ConnectivityManager.TYPE_WIMAX) {
// WiFi is good.
return true;
} else if (type == ConnectivityManager.TYPE_MOBILE) {
// Exclude mobile network types with avg speeds below or close to ~1Mbps.
// Giving new technologies introduced after this code was written benefit of the doubt.
switch (subType) {
case TelephonyManager.NETWORK_TYPE_1xRTT: // ~ 50-100 kbps
case TelephonyManager.NETWORK_TYPE_CDMA: // ~ 14-64 kbps
case TelephonyManager.NETWORK_TYPE_EDGE: // ~ 50-100 kbps
case TelephonyManager.NETWORK_TYPE_EVDO_0: // ~ 400-1000 kbps
case TelephonyManager.NETWORK_TYPE_EVDO_A: // ~ 600-1400 kbps
case TelephonyManager.NETWORK_TYPE_GPRS: // ~ 100 kbps
case TelephonyManager.NETWORK_TYPE_HSPA: // ~ 700-1700 kbps
case TelephonyManager.NETWORK_TYPE_IDEN: // ~ 25 kbps
case TelephonyManager.NETWORK_TYPE_EHRPD: // ~ 1-2 Mbps
return false;
default:
return true;
}
} else {
return false;
}
}
} |
<gh_stars>1-10
from AthenaPython import PyAthena
from AthenaCommon.Logging import logging
from AthenaCommon.AlgSequence import AlgSequence
class A4DumperAlg( A4Dumper__A4DumperAlg ):
def __init__( self,
name,
seq = topSequence,
**kwargs ):
self.__logger = logging.getLogger( "A4DumperAlg" )
|
<filename>kamu-core/src/infra/verification_service_impl.rs
// Copyright <NAME>, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use crate::domain::*;
use crate::infra::*;
use opendatafabric::*;
use dill::*;
use std::sync::Arc;
use tracing::info_span;
pub struct VerificationServiceImpl {
metadata_repo: Arc<dyn MetadataRepository>,
transform_service: Arc<dyn TransformService>,
volume_layout: Arc<VolumeLayout>,
}
#[component(pub)]
impl VerificationServiceImpl {
pub fn new(
metadata_repo: Arc<dyn MetadataRepository>,
transform_service: Arc<dyn TransformService>,
volume_layout: Arc<VolumeLayout>,
) -> Self {
Self {
metadata_repo,
transform_service,
volume_layout,
}
}
fn get_integrity_check_plan(
&self,
dataset_handle: &DatasetHandle,
block_range: (Option<Multihash>, Option<Multihash>),
) -> Result<Vec<(Multihash, MetadataBlock)>, VerificationError> {
let metadata_chain = self
.metadata_repo
.get_metadata_chain(&dataset_handle.as_local_ref())?;
let start_block = block_range.0;
let end_block = block_range
.1
.unwrap_or_else(|| metadata_chain.read_ref(&BlockRef::Head).unwrap());
let plan: Vec<_> = metadata_chain
.iter_blocks_starting(&end_block)
.ok_or(VerificationError::NoSuchBlock(end_block))?
.filter(|(_, block)| block.output_slice.is_some())
.take_while(|(hash, _)| Some(hash.clone()) != start_block)
.collect();
if let Some(start_block) = start_block {
if start_block != plan[plan.len() - 1].0 {
return Err(VerificationError::NoSuchBlock(start_block));
}
}
Ok(plan)
}
fn check_data_integrity(
&self,
dataset_handle: &DatasetHandle,
dataset_kind: DatasetKind,
block_range: (Option<Multihash>, Option<Multihash>),
listener: Arc<dyn VerificationListener>,
) -> Result<VerificationResult, VerificationError> {
let span = info_span!("Verifying data integrity");
let _span_guard = span.enter();
let dataset_layout = DatasetLayout::new(&self.volume_layout, &dataset_handle.name);
let plan = self.get_integrity_check_plan(dataset_handle, block_range)?;
let num_blocks = plan.len();
listener.begin_phase(VerificationPhase::DataIntegrity, num_blocks);
for (block_index, (block_hash, block)) in plan.into_iter().enumerate() {
let output_slice = block.output_slice.as_ref().unwrap();
listener.begin_block(
&block_hash,
block_index,
num_blocks,
VerificationPhase::DataIntegrity,
);
let data_path = dataset_layout.data_dir.join(block_hash.to_string());
// Do a fast pass using physical hash
let physical_hash_actual =
crate::infra::utils::data_utils::get_parquet_physical_hash(&data_path)
.map_err(|e| DomainError::InfraError(e.into()))?;
if physical_hash_actual != output_slice.data_physical_hash {
// Root data files are non-reproducible by definition, so
// if physical hashes don't match - we can give up right away.
if dataset_kind == DatasetKind::Root {
return Err(VerificationError::DataDoesNotMatchMetadata(
DataDoesNotMatchMetadata {
block_hash,
logical_hash: None,
physical_hash: Some(HashMismatch {
expected: output_slice.data_physical_hash.clone(),
actual: physical_hash_actual,
}),
},
));
} else {
// Derivative data may be replayed and produce different binary file
// but data must have same logical hash to be valid.
let logical_hash_actual =
crate::infra::utils::data_utils::get_parquet_logical_hash(&data_path)
.map_err(|e| DomainError::InfraError(e.into()))?;
if logical_hash_actual != output_slice.data_logical_hash {
return Err(VerificationError::DataDoesNotMatchMetadata(
DataDoesNotMatchMetadata {
block_hash,
logical_hash: Some(HashMismatch {
expected: output_slice.data_logical_hash.clone(),
actual: logical_hash_actual,
}),
physical_hash: None,
},
));
}
}
}
listener.end_block(
&block_hash,
block_index,
num_blocks,
VerificationPhase::DataIntegrity,
);
}
listener.end_phase(VerificationPhase::DataIntegrity, num_blocks);
Ok(VerificationResult::Valid)
}
}
impl VerificationService for VerificationServiceImpl {
fn verify(
&self,
dataset_ref: &DatasetRefLocal,
block_range: (Option<Multihash>, Option<Multihash>),
options: VerificationOptions,
maybe_listener: Option<Arc<dyn VerificationListener>>,
) -> Result<VerificationResult, VerificationError> {
let dataset_handle = self.metadata_repo.resolve_dataset_ref(dataset_ref)?;
let span = info_span!("Verifying dataset", %dataset_handle, ?block_range);
let _span_guard = span.enter();
let dataset_kind = self
.metadata_repo
.get_summary(&dataset_handle.as_local_ref())?
.kind;
let listener = maybe_listener.unwrap_or(Arc::new(NullVerificationListener {}));
listener.begin();
let res = try {
if options.check_integrity {
self.check_data_integrity(
&dataset_handle,
dataset_kind,
block_range.clone(),
listener.clone(),
)?;
}
if dataset_kind == DatasetKind::Derivative && options.replay_transformations {
self.transform_service.verify_transform(
&dataset_handle.as_local_ref(),
block_range.clone(),
options,
Some(listener.clone()),
)?;
}
VerificationResult::Valid
};
match &res {
Ok(result) => listener.success(result),
Err(error) => listener.error(error),
}
res
}
fn verify_multi(
&self,
_requests: &mut dyn Iterator<Item = VerificationRequest>,
_options: VerificationOptions,
_listener: Option<Arc<dyn VerificationMultiListener>>,
) -> Result<VerificationResult, VerificationError> {
unimplemented!()
}
}
|
“We’re not sure of who we are yet and we’re still trying to figure it out, so this was a really big point for us on the road.”
(4-3-3, 15 points) vs.(3-3-3, 12 points)Saturday, June 6, 20151 p.m. ET, Highmark StadiumIn the first ever USL match in St. Louis on April 12 at World Wide Technology Park, the Riverhounds battled back for a 1-1 draw, thanks to a 2nd half goal by VIni Dantas and some pretty big saves by Ryan Thompson At the time, Riverhounds Head Coach Mark Steffens came away pleased to get a point, as it was his team’s first real test playing on the road in hostile territory.
In a game where the Riverhounds were really struggling to keep possession, Steffens brought Stephan Okai off the bench, and that seemed to help the team keep the ball for longer stretches. It was one of the better games played by Okai as well as Dantas, who was causing all kinds of problems for the St. Louis back line — as he was fouled four separate times in the second half.
It was also another game in which began a season long trend in which the Riverhounds would battle back – and outplayed their opponents in the second half.
A Closer Look: St. Louis F.C.
Actually, this St. Louis team has emulated the Riverhounds in the record column.
They’ve been consistently inconsistent.
After nine USL matches, they carry the same record that Pittsburgh did last week with a soccer’s three-of-a-kind (3-3-3) record. They’ve also moved into the fourth round of the Lamar Hunt U.S. Open Cup with two wins, including a nice win over a tough NASL opponent – the Minnesota United.
But that’s about where the similarities end.
While the Riverhounds have scored 25 goals in league play, St. Louis has relied on a strong defense and played all close games — scoring a total of 11 goals in nine games. Their last seven USL matches have either ended in a draw or one goal difference.
In their most recent outings in league play, they have faced their Kings Cup rivals, Louisville City FC, in consecutive games played all-square – with 1-1 and 3-3 ties. The 3-3 tie, while not quite as wild as the Keystone Derby matches, still showed that St. Louis can have dangerous goal scoring capability. They scored three goals against a Louisville team that hadn’t given up a goal in a string of matches prior to that. But, like their tie with the Riverhounds, they couldn’t hold the lead each time, and settled for the tie.
Bryan Gaul has led St. Louis attack with three goals and an assist, playing all nine games.
What’s At Stake:
The Pittsburgh Riverhounds will aim to solidify their spot in the USL’s playoff zone. The Hounds moved up to fifth in the Eastern Conference standings last week with their incredible 6-5 win over the Harrisburg City Islanders in the Keystone Derby. St. Louis can pull level with the Hounds this weekend if Dale Schilly’s expansion team picks up the win.
This is the second and final meeting between the teams in this year’s USL regular season.
“Saint Louis is a very dangerous attacking team,”
said Riverhounds head coach Mark Steffens.
“They’ve got two big strikers that hold the ball well. “They pressed us well too in the first game and we couldn’t get a rhythm.
“We expect the same again.”
Last week’s drama against Harrisburg saw the Hounds regaining the USL’s best offensive record with the 25 goals in 10 games. Rob Vincent leads the league with eight goals and also heads the points, points-per-game and goals-per-game categories.
Vincent was named as the USL’s Player of the Week after following up his Lamar Hunt U.S. Open Cup game winner against the Tampa Bay Rowdies with two goals against the Islanders. Hounds captain Danny Earls joined the English midfielder on the league’s Team of the Week.
Kevin Kerr sits second on the assists chart behind Raul Mendiola of L.A. Galaxy II. Kerr has set up five of the Riverhounds’ league goals while scoring five of his own to rank third overall in points behind Vincent and the Sacramento Republic’s Rodrigo Lopez.
South African playmaker Lebo Moloto became the first player to register three assists in a USL game this year during last week’s win over Harrisburg.
But for all of the accolades, Steffens and his coaching staff still must find a way to shore up the Riverhounds defensive woes. They have given up the most goals in the league (20), and they were trailing most of the night against Harrisburg, and in their three losses, have been undone by defensive lapses and mistakes.
Injury Report
Steffens has a near full roster to choose from this weekend. Goalkeeper Ryan Thompson is unavailable after leaving Thursday to join the Jamaican national team in Chile for this month’s Copa America tournament.
Matt Dallman is the only other expected absentee as he recovers from a tweaked hip flexor.
Other Match Data:
Referee: David Mon
Assistant Referees: Democratis Pashalidis, Greg Irwin
Fourth Official: Lucas Feathers
Share this: Facebook
Twitter
Pocket
WhatsApp
Related |
/*
* Copyright (c) 2016 Inria and University Pierre and <NAME>
* All rights reserved.
*/
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <mm_malloc.h>
#ifdef EXBLAS_MPI
#include <mpi.h>
#endif
// exblas
#include "blas1.hpp"
#include "common.hpp"
#ifdef EXBLAS_VS_MPFR
#include <cstddef>
#include <mpfr.h>
double ExSUMVsMPFR(int N, double *a) {
mpfr_t mpaccum;
mpfr_init2(mpaccum, 2098);
mpfr_set_zero(mpaccum, 0);
for(int i = 0; i != N; ++i) {
mpfr_add_d(mpaccum, mpaccum, a[i], MPFR_RNDN);
}
double dacc = mpfr_get_d(mpaccum, MPFR_RNDN);
//mpfr_printf("%Ra\n", mpaccum);
mpfr_clear(mpaccum);
return dacc;
}
#endif
int main(int argc, char * argv[]) {
double eps = 1e-16;
int N = 1 << 20;
bool lognormal = false;
if(argc > 1) {
N = 1 << atoi(argv[1]);
}
if(argc > 4) {
if(argv[4][0] == 'n') {
lognormal = true;
}
}
int range = 1;
int emax = 0;
double mean = 1., stddev = 1.;
if(lognormal) {
stddev = strtod(argv[2], 0);
mean = strtod(argv[3], 0);
}
else {
if(argc > 2) {
range = atoi(argv[2]);
}
if(argc > 3) {
emax = atoi(argv[3]);
}
}
double *a;
#ifdef EXBLAS_MPI
int np = 1, p;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &p);
MPI_Comm_size(MPI_COMM_WORLD, &np);
if (p == 0) {
#endif
a = (double*)_mm_malloc(N*sizeof(double), 32);
if (!a)
fprintf(stderr, "Cannot allocate memory for the main array\n");
if(lognormal) {
init_lognormal(N, a, mean, stddev);
} else if ((argc > 4) && (argv[4][0] == 'i')) {
init_ill_cond(N, a, range);
} else {
if(range == 1){
init_naive(N, a);
} else {
init_fpuniform(N, a, range, emax);
}
}
fprintf(stderr, "%d ", N);
if(lognormal) {
fprintf(stderr, "%f ", stddev);
} else {
fprintf(stderr, "%d ", range);
}
#ifdef EXBLAS_MPI
}
#endif
bool is_pass = true;
double exsum_acc, exsum_fpe2, exsum_fpe4, exsum_fpe4ee, exsum_fpe6ee, exsum_fpe8ee;
exsum_acc = exsum(N, a, 1, 0, 0);
exsum_fpe2 = exsum(N, a, 1, 0, 2);
exsum_fpe4 = exsum(N, a, 1, 0, 4);
exsum_fpe4ee = exsum(N, a, 1, 0, 4, true);
exsum_fpe6ee = exsum(N, a, 1, 0, 6, true);
exsum_fpe8ee = exsum(N, a, 1, 0, 8, true);
#ifdef EXBLAS_MPI
if (p == 0) {
#endif
printf(" exsum with superacc = %.16g\n", exsum_acc);
printf(" exsum with FPE2 and superacc = %.16g\n", exsum_fpe2);
printf(" exsum with FPE4 and superacc = %.16g\n", exsum_fpe4);
printf(" exsum with FPE4 early-exit and superacc = %.16g\n", exsum_fpe4ee);
printf(" exsum with FPE6 early-exit and superacc = %.16g\n", exsum_fpe6ee);
printf(" exsum with FPE8 early-exit and superacc = %.16g\n", exsum_fpe8ee);
#ifdef EXBLAS_VS_MPFR
double exsumMPFR = ExSUMVsMPFR(N, a);
printf(" exsum with MPFR = %.16g\n", exsumMPFR);
exsum_acc = fabs(exsumMPFR - exsum_acc) / fabs(exsumMPFR);
exsum_fpe2 = fabs(exsumMPFR - exsum_fpe2) / fabs(exsumMPFR);
exsum_fpe4 = fabs(exsumMPFR - exsum_fpe4) / fabs(exsumMPFR);
exsum_fpe4ee = fabs(exsumMPFR - exsum_fpe4ee) / fabs(exsumMPFR);
exsum_fpe6ee = fabs(exsumMPFR - exsum_fpe6ee) / fabs(exsumMPFR);
exsum_fpe8ee = fabs(exsumMPFR - exsum_fpe8ee) / fabs(exsumMPFR);
if ((exsum_fpe2 > eps) || (exsum_fpe4 > eps) || (exsum_fpe4ee > eps) || (exsum_fpe6ee > eps) || (exsum_fpe8ee > eps)) {
is_pass = false;
printf("FAILED: %.16g \t %.16g \t %.16g \t %.16g \t %.16g\n", exsum_fpe2, exsum_fpe4, exsum_fpe4ee, exsum_fpe6ee, exsum_fpe8ee);
}
#else
exsum_fpe2 = fabs(exsum_acc - exsum_fpe2) / fabs(exsum_acc);
exsum_fpe4 = fabs(exsum_acc - exsum_fpe4) / fabs(exsum_acc);
exsum_fpe4ee = fabs(exsum_acc - exsum_fpe4ee) / fabs(exsum_acc);
exsum_fpe6ee = fabs(exsum_acc - exsum_fpe6ee) / fabs(exsum_acc);
exsum_fpe8ee = fabs(exsum_acc - exsum_fpe8ee) / fabs(exsum_acc);
if ((exsum_fpe2 > eps) || (exsum_fpe4 > eps) || (exsum_fpe4ee > eps) || (exsum_fpe6ee > eps) || (exsum_fpe8ee > eps)) {
is_pass = false;
printf("FAILED: %.16g \t %.16g \t %.16g \t %.16g \t %.16g\n", exsum_fpe2, exsum_fpe4, exsum_fpe4ee, exsum_fpe6ee, exsum_fpe8ee);
}
#endif
fprintf(stderr, "\n");
if (is_pass)
printf("TestPassed; ALL OK!\n");
else
printf("TestFailed!\n");
#ifdef EXBLAS_MPI
}
MPI_Finalize();
#endif
return 0;
}
|
import '@logseq/libs'
import * as AnkiConnect from './anki-connect/AnkiConnect';
import { LazyAnkiNoteManager } from './anki-connect/LazyAnkiNoteManager';
import { template_front, template_back, template_files } from './templates/AnkiCardTemplates';
import { Note } from './notes/Note';
import { ClozeNote } from './notes/ClozeNote';
import { MultilineCardNote } from './notes/MultilineCardNote';
import _ from 'lodash';
import { get_better_error_msg, confirm } from './utils';
import path from 'path';
export class LogseqToAnkiSync {
static isSyncing: boolean;
graphName: string;
modelName: string;
public async sync(): Promise<void> {
if (LogseqToAnkiSync.isSyncing) { console.log(`Syncing already in process...`); return; }
LogseqToAnkiSync.isSyncing = true;
try {
await this.performSync();
}
catch (e) {
logseq.App.showMsg(get_better_error_msg(e.toString()), 'warning');
console.error(e);
}
LogseqToAnkiSync.isSyncing = false;
}
private async performSync(): Promise<void> {
this.graphName = _.get(await logseq.App.getCurrentGraph(), 'name') || 'Default';
this.modelName = `${this.graphName}Model`.replace(/\s/g, "_");
logseq.App.showMsg(`Starting Logseq to Anki Sync for graph ${this.graphName}`);
console.log(`%cStarting Logseq to Anki Sync for graph ${this.graphName}`, 'color: green; font-size: 1.5em;');
// -- Request Access --
await AnkiConnect.requestPermission();
// -- Create models if it doesn't exists --
await AnkiConnect.createModel(this.modelName, ["uuid-type", "uuid", "Text", "Extra", "Breadcrumb", "Config"], template_front, template_back, template_files);
// -- Prepare Anki Note Manager --
let ankiNoteManager = new LazyAnkiNoteManager(this.modelName);
await ankiNoteManager.init();
Note.setAnkiNoteManager(ankiNoteManager);
// -- Get the notes that are to be synced from logseq --
let notes : Array<Note> = [...(await ClozeNote.getNotesFromLogseqBlocks()), ...(await MultilineCardNote.getNotesFromLogseqBlocks())];
for (let note of notes) { // Force persistance of note's logseq block uuid accross re-index by adding id property to block in logseq
if (!note.properties["id"]) { try { logseq.Editor.upsertBlockProperty(note.uuid, "id", note.uuid); } catch (e) { console.error(e); } }
}
console.log("Notes:", notes);
// -- Declare some variables to keep track of different operations performed --
let failedCreated: Set<string> = new Set(), failedUpdated: Set<string> = new Set(), failedDeleted: Set<string> = new Set();
let toCreateNotes = new Array<Note>(), toUpdateNotes = new Array<Note>(), toDeleteNotes = new Array<number>();
for (let note of notes) {
let ankiId = await note.getAnkiId();
if (ankiId == null || isNaN(ankiId)) toCreateNotes.push(note);
else toUpdateNotes.push(note);
}
let noteAnkiIds: Array<number> = await Promise.all(notes.map(async block => await block.getAnkiId())); // Flatten current logseq block's anki ids
let AnkiIds: Array<number> = [...ankiNoteManager.noteInfoMap.keys()];
for(let ankiId of AnkiIds) {
if(!noteAnkiIds.includes(ankiId)) {
toDeleteNotes.push(ankiId);
}
}
// -- Prompt the user what actions are going to be performed --
let confirm_msg = `<b>The logseq to anki sync plugin will attempt to perform the following actions:</b><br/>Create ${toCreateNotes.length} new anki notes<br/>Update ${toUpdateNotes.length} existing anki notes<br/>Delete ${toDeleteNotes.length} anki notes<br/><br/>Are you sure you want to coninue?`;
if (!(await confirm(confirm_msg))) { console.log("Sync Aborted by user!"); return; }
// -- Sync --
let start_time = performance.now();
await this.createNotes(toCreateNotes, failedCreated, ankiNoteManager);
await this.updateNotes(toUpdateNotes, failedUpdated, ankiNoteManager);
await this.deleteNotes(toDeleteNotes, ankiNoteManager, failedDeleted);
await AnkiConnect.invoke("reloadCollection", {});
// -- Show Result / Summery --
let summery = `Sync Completed! Created Blocks: ${toCreateNotes.length - failedCreated.size} \n Updated Blocks: ${toUpdateNotes.length - failedUpdated.size} \n Deleted Blocks: ${toDeleteNotes.length - failedDeleted.size}`;
let status = 'success';
if (failedCreated.size > 0) summery += `\nFailed Created: ${failedCreated.size} `;
if (failedUpdated.size > 0) summery += `\nFailed Updated: ${failedUpdated.size} `;
if (failedDeleted.size > 0) summery += `\nFailed Deleted: ${failedDeleted.size} `;
if (failedCreated.size > 0 || failedUpdated.size > 0 || failedDeleted.size > 0) status = 'warning';
logseq.App.showMsg(summery, status);
console.log(summery);
if (failedCreated.size > 0) console.log("\nFailed Created:", failedCreated);
if (failedUpdated.size > 0) console.log("\nFailed Updated:", failedUpdated);
if (failedDeleted.size > 0) console.log("\nFailed Deleted:", failedDeleted);
console.log("syncLogseqToAnki() Time Taken:", (performance.now() - start_time).toFixed(2), "ms");
}
private async createNotes(toCreateNotes: Note[], failedCreated: Set<any>, ankiNoteManager: LazyAnkiNoteManager): Promise<void> {
for (let note of toCreateNotes) {
try {
let [html, assets, deck, breadcrumb, tags, extra] = await this.parseNote(note);
// Add assets
const graphPath = (await logseq.App.getCurrentGraph()).path;
assets.forEach(asset => {
ankiNoteManager.storeAsset(encodeURIComponent(asset), path.join(graphPath, path.resolve(asset)))
});
// Create note
ankiNoteManager.addNote(deck, this.modelName, { "uuid-type": `${note.uuid}-${note.type}`, "uuid": note.uuid, "Text": html, "Extra": extra, "Breadcrumb": breadcrumb }, tags);
} catch (e) {
console.error(e); failedCreated.add(`${note.uuid}-${note.type}`);
}
}
let [addedNoteAnkiIdUUIDPairs, subOperationResults] = await ankiNoteManager.execute("addNotes");
for (let addedNoteAnkiIdUUIDPair of addedNoteAnkiIdUUIDPairs) { // update ankiId of added blocks
let uuidtype = addedNoteAnkiIdUUIDPair["uuid-type"];
let uuid = uuidtype.split("-").slice(0, -1).join("-");
let type = uuidtype.split("-").slice(-1)[0];
let note = _.find(toCreateNotes, { "uuid": uuid, "type": type });
note["ankiId"] = addedNoteAnkiIdUUIDPair["ankiId"];
console.log(note);
}
for (let subOperationResult of subOperationResults) {
if (subOperationResult != null && subOperationResult.error != null) {
console.error(subOperationResult.error);
failedCreated.add(subOperationResult["uuid-type"]);
}
}
ankiNoteManager.execute("storeAssets").then((subOperationResults) => {
for (let subOperationResult of subOperationResults) {
if (subOperationResult != null && subOperationResult.error != null) {
console.error(subOperationResult.error);
}
}
});
}
private async updateNotes(toUpdateNotes: Note[], failedUpdated: Set<any>, ankiNoteManager: LazyAnkiNoteManager): Promise<void> {
for (let note of toUpdateNotes) {
try {
let [html, assets, deck, breadcrumb, tags, extra] = await this.parseNote(note);
// Add assets
const graphPath = (await logseq.App.getCurrentGraph()).path;
assets.forEach(asset => {
ankiNoteManager.storeAsset(encodeURIComponent(asset), path.join(graphPath, path.resolve(asset)))
});
// Update note
let ankiId = note.getAnkiId();
ankiNoteManager.updateNote(ankiId, deck, this.modelName, { "uuid-type": `${note.uuid}-${note.type}`, "uuid": note.uuid, "Text": html, "Extra": extra, "Breadcrumb": breadcrumb }, tags);
} catch (e) {
console.error(e); failedUpdated.add(`${note.uuid}-${note.type}`);
}
}
let subOperationResults = await ankiNoteManager.execute("updateNotes");
for (let subOperationResult of subOperationResults) {
if (subOperationResult != null && subOperationResult.error != null) {
console.error(subOperationResult.error);
failedUpdated.add(subOperationResult["uuid-type"]);
}
}
ankiNoteManager.execute("storeAssets").then((subOperationResults) => {
for (let subOperationResult of subOperationResults) {
if (subOperationResult != null && subOperationResult.error != null) {
console.error(subOperationResult.error);
}
}
});
}
private async deleteNotes(toDeleteNotes: number[], ankiNoteManager: LazyAnkiNoteManager, failedDeleted) {
for(let ankiId of toDeleteNotes){
ankiNoteManager.deleteNote(ankiId);
}
let subOperationResults = await ankiNoteManager.execute("deleteNotes");
for (let subOperationResult of subOperationResults) {
if (subOperationResult != null && subOperationResult.error != null) {
console.error(subOperationResult.error);
failedDeleted.add(subOperationResult.error.ankiId);
}
}
}
private async parseNote(note: Note): Promise<[string, Set<string>, string, string, string[], string]> {
let {html, assets} = await note.addClozes().convertToHtmlFile();
// Parse deck using logic described at https://github.com/debanjandhar12/logseq-anki-sync/wiki/How-to-set-or-change-the-deck-for-cards%3F
let deck: any = _.get(note, 'properties.deck') || _.get(note, 'page.properties.deck') || "Default";
try {
let parentID = note.uuid;
let parent;
while ((parent = await logseq.App.getBlock(parentID)) != null) {
if(_.get(parent, 'properties.deck') != null){
deck = _.get(parent, 'properties.deck');
break;
}
parentID = parent.parent.id;
}
} catch (e) {
console.error(e);
}
if (typeof deck != "string") deck = deck[0];
deck = deck.replace(/\//g, "::");
if(deck == "Default" && _.get(note, 'page.properties.title') != null && _.get(note, 'page.properties.title').includes("/")) deck = _.get(note, 'page.properties.title').split("/").slice(0, -1).join("::");
// Parse breadcrumb
let breadcrumb = `<a href="logseq://graph/${encodeURIComponent(this.graphName)}?page=${encodeURIComponent(note.page.originalName)}" title="${note.page.originalName}">${note.page.originalName}</a>`;
if(logseq.settings.breadcrumbDisplay == "Show Page name and parent blocks context") {
try {
let parentBlocks = [];
let parentID = note.parent;
let parent;
while ((parent = await logseq.App.getBlock(parentID)) != null) {
parentBlocks.push({content:parent.content.replaceAll(/^\s*(\w|-)*::.*\n?\n?/gm, ""), uuid:parent.uuid});
parentID = parent.parent.id;
}
while(parentBlocks.length > 0) {
let parentBlock = parentBlocks.pop();
let parentBlockContentFirstLine = parentBlock.content.split("\n")[0];
breadcrumb += ` > <a href="logseq://graph/${encodeURIComponent(this.graphName)}?block-id=${encodeURIComponent(parentBlock.uuid)}" title="${parentBlock.content}">${parentBlockContentFirstLine}</a>`;
}
} catch (e) {
console.error(e);
}
}
let tags = [...(_.get(note, 'properties.tags') || []), ...(_.get(note, 'page.properties.tags') || [])];
let extra = _.get(note, 'properties.extra') || _.get(note, 'page.properties.extra') || "";
if (Array.isArray(extra)) extra = extra.join(" ");
return [html, assets, deck, breadcrumb, tags, extra];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.