text
null
inputs
dict
prediction
null
prediction_agent
null
annotation
list
annotation_agent
null
multi_label
bool
1 class
explanation
null
id
stringlengths
1
5
metadata
null
status
stringclasses
2 values
event_timestamp
null
metrics
null
null
{ "abstract": " We characterize photonic transport in a boundary driven array of nonlinear\noptical cavities. We find that the output field suddenly drops when the chain\nlength is increased beyond a threshold. After this threshold a highly chaotic\nand unstable regime emerges, which marks the onset of a super-diffusive\nphotonic transport. We show the scaling of the threshold with pump intensity\nand nonlinearity. Finally, we address the competition of disorder and\nnonlinearity presenting a diffusive-insulator phase transition.\n", "title": "Nonequilibrium photonic transport and phase transition in an array of optical cavities" }
null
null
null
null
true
null
2401
null
Default
null
null
null
{ "abstract": " We show empirically that the optimal strategy of parameter averaging in a\nminmax convex-concave game setting is also strikingly effective in the non\nconvex-concave GAN setting, specifically alleviating the convergence issues\nassociated with cycling behavior observed in GANs. We show that averaging over\ngenerator parameters outside of the trainig loop consistently improves\ninception and FID scores on different architectures and for different GAN\nobjectives. We provide comprehensive experimental results across a range of\ndatasets, bilinear games, mixture of Gaussians, CIFAR-10, STL-10, CelebA and\nImageNet, to demonstrate its effectiveness. We achieve state-of-the-art results\non CIFAR-10 and produce clean CelebA face images, demonstrating that averaging\nis one of the most effective techniques for training highly performant GANs.\n", "title": "The Unusual Effectiveness of Averaging in GAN Training" }
null
null
null
null
true
null
2402
null
Default
null
null
null
{ "abstract": " We compare the long-term fractional frequency variation of four hydrogen\nmasers that are part of an ensemble of clocks comprising the National Institute\nof Standards and Technology,(NIST), Boulder, timescale with the fractional\nfrequencies of primary frequency standards operated by leading metrology\nlaboratories in the United States, France, Germany, Italy and the United\nKingdom for a period extending more than 14 years. The measure of the assumed\nvariation of non-gravitational interaction,(LPI parameter, $\\beta$)---within\nthe atoms of H and Cs---over time as the earth orbits the sun, has been\nconstrained to $\\beta=(2.2 \\pm 2.5)\\times 10^{-7}$, a factor of two improvement\nover previous estimates. Using our results together with the previous best\nestimates of $\\beta$ based on Rb vs. Cs, and Rb vs. H comparisons, we impose\nthe most stringent limits to date on the dimensionless coupling constants that\nrelate the variation of fundamental constants such as the fine-structure\nconstant and the scaled quark mass with strong(QCD) interaction to the\nvariation in the local gravitational potential. For any metric theory of\ngravity $\\beta=0$.\n", "title": "A null test of General Relativity: New limits on Local Position Invariance and the variation of fundamental constants" }
null
null
[ "Physics" ]
null
true
null
2403
null
Validated
null
null
null
{ "abstract": " The bootstrap current and flow velocity of a low-collisionality stellarator\nplasma are calculated. As far as possible, the analysis is carried out in a\nuniform way across all low-collisionality regimes in general stellarator\ngeometry, assuming only that the confinement is good enough that the plasma is\napproximately in local thermodynamic equilibrium. It is found that conventional\nexpressions for the ion flow speed and bootstrap current in the\nlow-collisionality limit are accurate only in the $1/\\nu$-collisionality regime\nand need to be modified in the $\\sqrt{\\nu}$-regime. The correction due to\nfinite collisionality is also discussed and is found to scale as $\\nu^{2/5}$.\n", "title": "Stellarator bootstrap current and plasma flow velocity at low collisionality" }
null
null
null
null
true
null
2404
null
Default
null
null
null
{ "abstract": " We use the Hubble Space Telescope to obtain WFC3/F390W imaging of the\nsupergroup SG1120-1202 at z=0.37, mapping the UV emission of 138\nspectroscopically confirmed members. We measure total (F390W-F814W) colors and\nvisually classify the UV morphology of individual galaxies as \"clumpy\" or\n\"smooth.\" Approximately 30% of the members have pockets of UV emission (clumpy)\nand we identify for the first time in the group environment galaxies with UV\nmorphologies similar to the jellyfish galaxies observed in massive clusters. We\nstack the clumpy UV members and measure a shallow internal color gradient,\nwhich indicates unobscured star formation is occurring throughout these\ngalaxies. We also stack the four galaxy groups and measure a strong trend of\ndecreasing UV emission with decreasing projected group distance ($R_{proj}$).\nWe find that the strong correlation between decreasing UV emission and\nincreasing stellar mass can fully account for the observed trend in\n(F390W-F814W) - $R_{proj}$, i.e., mass-quenching is the dominant mechanism for\nextinguishing UV emission in group galaxies. Our extensive multi-wavelength\nanalysis of SG1120-1202 indicates that stellar mass is the primary predictor of\nUV emission, but that the increasing fraction of massive (red/smooth) galaxies\nat $R_{proj}$ < 2$R_{200}$ and existence of jellyfish candidates is due to the\ngroup environment.\n", "title": "SG1120-1202: Mass-Quenching as Tracked by UV Emission in the Group Environment at z=0.37" }
null
null
[ "Physics" ]
null
true
null
2405
null
Validated
null
null
null
{ "abstract": " Society faces a fundamental global problem of understanding which individuals\nare currently developing strong support for some extremist entity such as ISIS\n(Islamic State) -- even if they never end up doing anything in the real world.\nThe importance of online connectivity in developing intent has been confirmed\nby recent case-studies of already convicted terrorists. Here we identify\ndynamical patterns in the online trajectories that individuals take toward\ndeveloping a high level of extremist support -- specifically, for ISIS. Strong\nmemory effects emerge among individuals whose transition is fastest, and hence\nmay become 'out of the blue' threats in the real world. A generalization of\ndiagrammatic expansion theory helps quantify these characteristics, including\nthe impact of changes in geographical location, and can facilitate prediction\nof future risks. By quantifying the trajectories that individuals follow on\ntheir journey toward expressing high levels of pro-ISIS support -- irrespective\nof whether they then carry out a real-world attack or not -- our findings can\nhelp move safety debates beyond reliance on static watch-list identifiers such\nas ethnic background or immigration status, and/or post-fact interviews with\nalready-convicted individuals. Given the broad commonality of social media\nplatforms, our results likely apply quite generally: for example, even on\nTelegram where (like Twitter) there is no built-in group feature as in our\nstudy, individuals tend to collectively build and pass through so-called\nsuper-group accounts.\n", "title": "Dynamical patterns in individual trajectories toward extremism" }
null
null
null
null
true
null
2406
null
Default
null
null
null
{ "abstract": " This paper establishes convergence rate bounds for a variant of the proximal\nalternating direction method of multipliers (ADMM) for solving nonconvex\nlinearly constrained optimization problems. The variant of the proximal ADMM\nallows the inclusion of an over-relaxation stepsize parameter belonging to the\ninterval $(0,2)$. To the best of our knowledge, all related papers in the\nliterature only consider the case where the over-relaxation parameter lies in\nthe interval $(0,(1+\\sqrt{5})/2)$.\n", "title": "Convergence rate bounds for a proximal ADMM with over-relaxation stepsize parameter for solving nonconvex linearly constrained problems" }
null
null
null
null
true
null
2407
null
Default
null
null
null
{ "abstract": " In this paper, we address the basic problem of recognizing moving objects in\nvideo images using Visual Vocabulary model and Bag of Words and track our\nobject of interest in the subsequent video frames using species inspired PSO.\nInitially, the shadow free images are obtained by background modelling followed\nby foreground modeling to extract the blobs of our object of interest.\nSubsequently, we train a cubic SVM with human body datasets in accordance with\nour domain of interest for recognition and tracking. During training, using the\nprinciple of Bag of Words we extract necessary features of certain domains and\nobjects for classification. Subsequently, matching these feature sets with\nthose of the extracted object blobs that are obtained by subtracting the shadow\nfree background from the foreground, we detect successfully our object of\ninterest from the test domain. The performance of the classification by cubic\nSVM is satisfactorily represented by confusion matrix and ROC curve reflecting\nthe accuracy of each module. After classification, our object of interest is\ntracked in the test domain using species inspired PSO. By combining the\nadaptive learning tools with the efficient classification of description, we\nachieve optimum accuracy in recognition of the moving objects. We evaluate our\nalgorithm benchmark datasets: iLIDS, VIVID, Walking2, Woman. Comparative\nanalysis of our algorithm against the existing state-of-the-art trackers shows\nvery satisfactory and competitive results.\n", "title": "Detection, Recognition and Tracking of Moving Objects from Real-time Video via Visual Vocabulary Model and Species Inspired PSO" }
null
null
[ "Computer Science" ]
null
true
null
2408
null
Validated
null
null
null
{ "abstract": " Klavs F. Jensen is Warren K. Lewis Professor in Chemical Engineering and\nMaterials Science and Engineering at the Massachusetts Institute of Technology.\nHere he describes the use of microfluidics for chemical synthesis, from the\nearly demonstration examples to the current efforts with automated droplet\nmicrofluidic screening and optimization techniques.\n", "title": "Microfluidics for Chemical Synthesis: Flow Chemistry" }
null
null
null
null
true
null
2409
null
Default
null
null
null
{ "abstract": " The complexity and size of state-of-the-art cell models have significantly\nincreased in part due to the requirement that these models possess complex\ncellular functions which are thought--but not necessarily proven--to be\nimportant. Modern cell models often involve hundreds of parameters; the values\nof these parameters come, more often than not, from animal experiments whose\nrelationship to the human physiology is weak with very little information on\nthe errors in these measurements. The concomitant uncertainties in parameter\nvalues result in uncertainties in the model outputs or Quantities of Interest\n(QoIs). Global Sensitivity Analysis (GSA) aims at apportioning to individual\nparameters (or sets of parameters) their relative contribution to output\nuncertainty thereby introducing a measure of influence or importance of said\nparameters. New GSA approaches are required to deal with increased model size\nand complexity; a three stage methodology consisting of screening (dimension\nreduction), surrogate modeling, and computing Sobol' indices, is presented. The\nmethodology is used to analyze a physiologically validated numerical model of\nneurovascular coupling which possess 160 uncertain parameters. The sensitivity\nanalysis investigates three quantities of interest (QoIs), the average value of\n$K^+$ in the extracellular space, the average volumetric flow rate through the\nperfusing vessel, and the minimum value of the actin/myosin complex in the\nsmooth muscle cell. GSA provides a measure of the influence of each parameter,\nfor each of the three QoIs, giving insight into areas of possible physiological\ndysfunction and areas of further investigation.\n", "title": "Global Sensitivity Analysis of High Dimensional Neuroscience Models: An Example of Neurovascular Coupling" }
null
null
null
null
true
null
2410
null
Default
null
null
null
{ "abstract": " We introduce a new method for building models of CH, together with $\\Pi_2$\nstatements over $H(\\omega_2)$, by forcing over a model of CH. Unlike similar\nconstructions in the literature, our construction adds new reals, but only\n$\\aleph_1$-many of them. Using this approach, we prove that a very strong form\nof the negation of Club Guessing at $\\omega_1$ known as Measuring is consistent\ntogether with CH, thereby answering a well-known question of Moore. The\nconstruction works over any model of ZFC + CH and can be described as a finite\nsupport forcing construction with finite systems of countable models with\nmarkers as side conditions and with strong symmetry constraints on both side\nconditions and working parts.\n", "title": "Few new reals" }
null
null
null
null
true
null
2411
null
Default
null
null
null
{ "abstract": " In this paper we propose a Hamiltonian approach to gapped topological phases\non an open surface with boundary. Our setting is an extension of the Levin-Wen\nmodel to a 2d graph on the open surface, whose boundary is part of the graph.\nWe systematically construct a series of boundary Hamiltonians such that each of\nthem, when combined with the usual Levin-Wen bulk Hamiltonian, gives rise to a\ngapped energy spectrum which is topologically protected; and the corresponding\nwave functions are robust under changes of the underlying graph that maintain\nthe spatial topology of the system. We derive explicit ground-state\nwavefunctions of the system and show that the boundary types are classified by\nMorita-equivalent Frobenius algebras. We also construct boundary quasiparticle\ncreation, measuring and hopping operators. These operators allow us to\ncharacterize the boundary quasiparticles by bimodules of Frobenius algebras.\nOur approach also offers a concrete set of tools for computations. We\nillustrate our approach by a few examples.\n", "title": "Boundary Hamiltonian theory for gapped topological phases on an open surface" }
null
null
[ "Physics", "Mathematics" ]
null
true
null
2412
null
Validated
null
null
null
{ "abstract": " This paper considers the non-Hermitian Zakharov-Shabat (ZS) scattering\nproblem which forms the basis for defining the SU$(2)$-nonlinear Fourier\ntransformation (NFT). The theoretical underpinnings of this generalization of\nthe conventional Fourier transformation is quite well established in the\nAblowitz-Kaup-Newell-Segur (AKNS) formalism; however, efficient numerical\nalgorithms that could be employed in practical applications are still\nunavailable.\nIn this paper, we present a unified framework for the forward and inverse NFT\nusing exponential one-step methods which are amenable to FFT-based fast\npolynomial arithmetic. Within this discrete framework, we propose a fast\nDarboux transformation (FDT) algorithm having an operational complexity of\n$\\mathscr{O}\\left(KN+N\\log^2N\\right)$ such that the error in the computed\n$N$-samples of the $K$-soliton vanishes as $\\mathscr{O}\\left(N^{-p}\\right)$\nwhere $p$ is the order of convergence of the underlying one-step method. For\nfixed $N$, this algorithm outperforms the the classical DT (CDT) algorithm\nwhich has a complexity of $\\mathscr{O}\\left(K^2N\\right)$. We further present\nextension of these algorithms to the general version of DT which allows one to\nadd solitons to arbitrary profiles that are admissible as scattering potentials\nin the ZS-problem. The general CDT/FDT algorithms have the same operational\ncomplexity as that of the $K$-soliton case and the order of convergence matches\nthat of the underlying one-step method. A comparative study of these algorithms\nis presented through exhaustive numerical tests.\n", "title": "Fast Inverse Nonlinear Fourier Transformation using Exponential One-Step Methods, Part I: Darboux Transformation" }
null
null
null
null
true
null
2413
null
Default
null
null
null
{ "abstract": " The central goal of this thesis is to develop methods to experimentally study\ntopological phases. We do so by applying the powerful toolbox of quantum\nsimulation techniques with cold atoms in optical lattices. To this day, a\ncomplete classification of topological phases remains elusive. In this context,\nexperimental studies are key, both for studying the interplay between topology\nand complex effects and for identifying new forms of topological order. It is\ntherefore crucial to find complementary means to measure topological properties\nin order to reach a fundamental understanding of topological phases. In one\ndimensional chiral systems, we suggest a new way to construct and identify\ntopologically protected bound states, which are the smoking gun of these\nmaterials. In two dimensional Hofstadter strips (i.e: systems which are very\nshort along one dimension), we suggest a new way to measure the topological\ninvariant directly from the atomic dynamics.\n", "title": "Propagation in media as a probe for topological properties" }
null
null
[ "Physics" ]
null
true
null
2414
null
Validated
null
null
null
{ "abstract": " Given a matrix $\\mathbf{A}\\in\\mathbb{R}^{n\\times d}$ and a vector $b\n\\in\\mathbb{R}^{d}$, we show how to compute an $\\epsilon$-approximate solution\nto the regression problem $ \\min_{x\\in\\mathbb{R}^{d}}\\frac{1}{2} \\|\\mathbf{A} x\n- b\\|_{2}^{2} $ in time $ \\tilde{O} ((n+\\sqrt{d\\cdot\\kappa_{\\text{sum}}})\\cdot\ns\\cdot\\log\\epsilon^{-1}) $ where\n$\\kappa_{\\text{sum}}=\\mathrm{tr}\\left(\\mathbf{A}^{\\top}\\mathbf{A}\\right)/\\lambda_{\\min}(\\mathbf{A}^{T}\\mathbf{A})$\nand $s$ is the maximum number of non-zero entries in a row of $\\mathbf{A}$. Our\nalgorithm improves upon the previous best running time of $ \\tilde{O}\n((n+\\sqrt{n \\cdot\\kappa_{\\text{sum}}})\\cdot s\\cdot\\log\\epsilon^{-1})$.\nWe achieve our result through a careful combination of leverage score\nsampling techniques, proximal point methods, and accelerated coordinate\ndescent. Our method not only matches the performance of previous methods, but\nfurther improves whenever leverage scores of rows are small (up to\npolylogarithmic factors). We also provide a non-linear generalization of these\nresults that improves the running time for solving a broader class of ERM\nproblems.\n", "title": "Leverage Score Sampling for Faster Accelerated Regression and ERM" }
null
null
null
null
true
null
2415
null
Default
null
null
null
{ "abstract": " Autonomous robots increasingly depend on third-party off-the-shelf components\nand complex machine-learning techniques. This trend makes it challenging to\nprovide strong design-time certification of correct operation. To address this\nchallenge, we present SOTER, a programming framework that integrates the core\nprinciples of runtime assurance to enable the use of uncertified controllers,\nwhile still providing safety guarantees.\nRuntime Assurance (RTA) is an approach used for safety-critical systems where\ndesign-time analysis is coupled with run-time techniques to switch between\nunverified advanced controllers and verified simple controllers. In this paper,\nwe present a runtime assurance programming framework for modular design of\nprovably-safe robotics software. \\tool provides language primitives to\ndeclaratively construct a \\rta module consisting of an advanced controller\n(untrusted), a safe controller (trusted), and the desired safety specification\n(S). If the RTA module is well formed then the framework provides a formal\nguarantee that it satisfies property S. The compiler generates code for\nmonitoring system state and switching control between the advanced and safe\ncontroller in order to guarantee S. RTA allows complex systems to be\nconstructed through the composition of RTA modules.\nTo demonstrate the efficacy of our framework, we consider a real-world\ncase-study of building a safe drone surveillance system. Our experiments both\nin simulation and on actual drones show that RTA-enabled RTA ensures safety of\nthe system, including when untrusted third-party components have bugs or\ndeviate from the desired behavior.\n", "title": "SOTER: Programming Safe Robotics System using Runtime Assurance" }
null
null
null
null
true
null
2416
null
Default
null
null
null
{ "abstract": " Silicon photomultipliers (SiPMs) are potential solid-state alternatives to\ntraditional photomultiplier tubes (PMTs) for single-photon detection. In this\npaper, we report on evaluating SensL MicroFC-10035-SMT SiPMs for their\nsuitability as PMT replacements. The devices were successfully operated in a\nliquid-xenon detector, which demonstrates that SiPMs can be used in noble\nelement time projection chambers as photosensors. The devices were also cooled\ndown to 170 K to observe dark count dependence on temperature. No dependencies\non the direction of an applied 3.2 kV/cm electric field were observed with\nrespect to dark-count rate, gain, or photon detection efficiency.\n", "title": "On the Evaluation of Silicon Photomultipliers for Use as Photosensors in Liquid Xenon Detectors" }
null
null
null
null
true
null
2417
null
Default
null
null
null
{ "abstract": " Experiments may not reveal their full import at the time that they are\nperformed. The scientists who perform them usually are testing a specific\nhypothesis and quite often have specific expectations limiting the possible\ninferences that can be drawn from the experiment. Nonetheless, as Hacking has\nsaid, experiments have lives of their own. Those lives do not end with the\ninitial report of the results and consequences of the experiment. Going back\nand rethinking the consequences of the experiment in a new context, theoretical\nor empirical, has great merit as a strategy for investigation and for\nscientific problem analysis. I apply this analysis to the interplay between\nFizeau's classic optical experiments and the building of special relativity.\nEinstein's understanding of the problems facing classical electrodynamics and\noptics, in part, was informed by Fizeau's 1851 experiments. However, between\n1851 and 1905, Fizeau's experiments were duplicated and reinterpreted by a\nsuccession of scientists, including Hertz, Lorentz, and Michelson. Einstein's\nanalysis of the consequences of the experiments is tied closely to this\ntheoretical and experimental tradition. However, Einstein's own inferences from\nthe experiments differ greatly from the inferences drawn by others in that\ntradition.\n", "title": "Reconsidering Experiments" }
null
null
null
null
true
null
2418
null
Default
null
null
null
{ "abstract": " We study the statistical and computational aspects of kernel principal\ncomponent analysis using random Fourier features and show that under mild\nassumptions, $O(\\sqrt{n} \\log n)$ features suffices to achieve\n$O(1/\\epsilon^2)$ sample complexity. Furthermore, we give a memory efficient\nstreaming algorithm based on classical Oja's algorithm that achieves this rate.\n", "title": "Streaming Kernel PCA with $\\tilde{O}(\\sqrt{n})$ Random Features" }
null
null
null
null
true
null
2419
null
Default
null
null
null
{ "abstract": " We consider a population of $n$ agents which communicate with each other in a\ndecentralized manner, through random pairwise interactions. One or more agents\nin the population may act as authoritative sources of information, and the\nobjective of the remaining agents is to obtain information from or about these\nsource agents. We study two basic tasks: broadcasting, in which the agents are\nto learn the bit-state of an authoritative source which is present in the\npopulation, and source detection, in which the agents are required to decide if\nat least one source agent is present in the population or not.We focus on\ndesigning protocols which meet two natural conditions: (1) universality, i.e.,\nindependence of population size, and (2) rapid convergence to a correct global\nstate after a reconfiguration, such as a change in the state of a source agent.\nOur main positive result is to show that both of these constraints can be met.\nFor both the broadcasting problem and the source detection problem, we obtain\nsolutions with a convergence time of $O(\\log^2 n)$ rounds, w.h.p., from any\nstarting configuration. The solution to broadcasting is exact, which means that\nall agents reach the state broadcast by the source, while the solution to\nsource detection admits one-sided error on a $\\varepsilon$-fraction of the\npopulation (which is unavoidable for this problem). Both protocols are easy to\nimplement in practice and have a compact formulation.Our protocols exploit the\nproperties of self-organizing oscillatory dynamics. On the hardness side, our\nmain structural insight is to prove that any protocol which meets the\nconstraints of universality and of rapid convergence after reconfiguration must\ndisplay a form of non-stationary behavior (of which oscillatory dynamics are an\nexample). We also observe that the periodicity of the oscillatory behavior of\nthe protocol, when present, must necessarily depend on the number $^\\\\# X$ of\nsource agents present in the population. For instance, our protocols inherently\nrely on the emergence of a signal passing through the population, whose period\nis $\\Theta(\\log \\frac{n}{^\\\\# X})$ rounds for most starting configurations. The\ndesign of clocks with tunable frequency may be of independent interest, notably\nin modeling biological networks.\n", "title": "Universal Protocols for Information Dissemination Using Emergent Signals" }
null
null
[ "Computer Science" ]
null
true
null
2420
null
Validated
null
null
null
{ "abstract": " In this note we show that a mutation theory of species with potential can be\ndefined so that a certain class of skew-symmetrizable integer matrices have a\nspecies realization admitting a non-degenerate potential. This gives a partial\naffirmative answer to a question raised by Jan Geuenich and Daniel\nLabardini-Fragoso. We also provide an example of a class of skew-symmetrizable\n$4 \\times 4$ integer matrices, which are not globally unfoldable nor strongly\nprimitive, and that have a species realization admitting a non-degenerate\npotential.\n", "title": "A note on species realizations and nondegeneracy of potentials" }
null
null
null
null
true
null
2421
null
Default
null
null
null
{ "abstract": " We use the \"generalized hierarchical equation of motion\" proposed in Paper I\nto study decoherence in a system coupled to a spin bath. The present\nmethodology allows a systematic incorporation of higher order anharmonic\neffects of the bath in dynamical calculations. We investigate the leading order\ncorrections to the linear response approximations for spin bath models. Two\ntypes of spin-based environments are considered: (1) a bath of spins\ndiscretized from a continuous spectral density and (2) a bath of physical spins\nsuch as nuclear or electron spins. The main difference resides with how the\nbath frequency and the system-bath coupling parameters are chosen to represent\nan environment. When discretized from a continuous spectral density, the\nsystem-bath coupling typically scales as $\\sim 1/\\sqrt{N_B}$ where $N_B$ is the\nnumber of bath spins. This scaling suppresses the non-Gaussian characteristics\nof the spin bath and justify the linear response approximations in the\nthermodynamic limit. For the physical spin bath models, system-bath couplings\nare directly deduced from spin-spin interactions with no reason to obey the\n$1/\\sqrt{N_B}$ scaling. It is not always possible to justify the linear\nresponse approximations. Furthermore, if the spin-spin Hamiltonian and/or the\nbath parameters are highly symmetrical, these additional constraints generate\nnon-Markovian and persistent dynamics that is beyond the linear response\ntreatments.\n", "title": "A Unified Stochastic Formulation of Dissipative Quantum Dynamics. II. Beyond Linear Response of Spin Baths" }
null
null
null
null
true
null
2422
null
Default
null
null
null
{ "abstract": " We consider the ground-state properties of Rashba spin-orbit-coupled\npseudo-spin-1/2 Bose-Einstein condensates (BECs) in a rotating two-dimensional\n(2D) toroidal trap. In the absence of spin-orbit coupling (SOC), the increasing\nrotation frequency enhances the creation of giant vortices for the initially\nmiscible BECs, while it can lead to the formation of semiring density patterns\nwith irregular hidden vortex structures for the initially immiscible BECs.\nWithout rotation, strong 2D isotropic SOC yields a heliciform-stripe phase for\nthe initially immiscible BECs. Combined effects of rotation, SOC, and\ninteratomic interactions on the vortex structures and typical spin textures of\nthe ground state of the system are discussed systematically. In particular, for\nfixed rotation frequency above the critical value, the increasing isotropic SOC\nfavors a visible vortex ring in each component which is accompanied by a hidden\ngiant vortex plus a (several) hidden vortex ring(s) in the central region. In\nthe case of 1D anisotropic SOC, large SOC strength results in the generation of\nhidden linear vortex string and the transition from initial phase separation\n(phase mixing) to phase mixing (phase separation). Furthermore, the peculiar\nspin textures including skyrmion lattice, skyrmion pair and skyrmion string are\nrevealed in this system.\n", "title": "Vortex states and spin textures of rotating spin-orbit-coupled Bose-Einstein condensates in a toroidal trap" }
null
null
null
null
true
null
2423
null
Default
null
null
null
{ "abstract": " Solving the global method of Weighted Least Squares (WLS) model in image\nfiltering is both time- and memory-consuming. In this paper, we present an\nalternative approximation in a time- and memory- efficient manner which is\ndenoted as Semi-Global Weighed Least Squares (SG-WLS). Instead of solving a\nlarge linear system, we propose to iteratively solve a sequence of subsystems\nwhich are one-dimensional WLS models. Although each subsystem is\none-dimensional, it can take two-dimensional neighborhood information into\naccount due to the proposed special neighborhood construction. We show such a\ndesirable property makes our SG-WLS achieve close performance to the original\ntwo-dimensional WLS model but with much less time and memory cost. While\nprevious related methods mainly focus on the 4-connected/8-connected\nneighborhood system, our SG-WLS can handle a more general and larger\nneighborhood system thanks to the proposed fast solution. We show such a\ngeneralization can achieve better performance than the 4-connected/8-connected\nneighborhood system in some applications. Our SG-WLS is $\\sim20$ times faster\nthan the WLS model. For an image of $M\\times N$, the memory cost of SG-WLS is\nat most at the magnitude of $max\\{\\frac{1}{M}, \\frac{1}{N}\\}$ of that of the\nWLS model. We show the effectiveness and efficiency of our SG-WLS in a range of\napplications.\n", "title": "Semi-Global Weighted Least Squares in Image Filtering" }
null
null
null
null
true
null
2424
null
Default
null
null
null
{ "abstract": " This work builds on earlier results. We define universal elliptic Gau{\\ss}\nsums for Atkin primes in Schoof's algorithm for counting points on elliptic\ncurves. Subsequently, we show these quantities admit an efficiently computable\nrepresentation in terms of the $j$-invariant and two other modular functions.\nWe analyse the necessary computations in detail and derive an alternative\napproach for determining the trace of the Frobenius homomorphism for Atkin\nprimes using these pre-computations. A rough run-time analysis shows, however,\nthat this new method is not competitive with existing ones.\n", "title": "Universal elliptic Gauß sums for Atkin primes in Schoof's algorithm" }
null
null
[ "Mathematics" ]
null
true
null
2425
null
Validated
null
null
null
{ "abstract": " Residual Network (ResNet) is the state-of-the-art architecture that realizes\nsuccessful training of really deep neural network. It is also known that good\nweight initialization of neural network avoids problem of vanishing/exploding\ngradients. In this paper, simplified models of ResNets are analyzed. We argue\nthat goodness of ResNet is correlated with the fact that ResNets are relatively\ninsensitive to choice of initial weights. We also demonstrate how batch\nnormalization improves backpropagation of deep ResNets without tuning initial\nvalues of weights.\n", "title": "Deep Residual Networks and Weight Initialization" }
null
null
null
null
true
null
2426
null
Default
null
null
null
{ "abstract": " A second generation of gravitational wave detectors will soon come online\nwith the objective of measuring for the first time the tiny gravitational\nsignal from the coalescence of black hole and/or neutron star binaries. In this\ncommunication, we propose a new time-frequency search method alternative to\nmatched filtering techniques that are usually employed to detect this signal.\nThis method relies on a graph that encodes the time evolution of the signal and\nits variability by establishing links between coefficients in the multi-scale\ntime-frequency decomposition of the data. We provide a proof of concept for\nthis approach.\n", "title": "Wavelet graphs for the direct detection of gravitational waves" }
null
null
null
null
true
null
2427
null
Default
null
null
null
{ "abstract": " A surprising diversity of different products of hypergraphs have been\ndiscussed in the literature. Most of the hypergraph products can be viewed as\ngeneralizations of one of the four standard graph products. The most widely\nstudied variant, the so-called square product, does not have this property,\nhowever. Here we survey the literature on hypergraph products with an emphasis\non comparing the alternative generalizations of graph products and the\nrelationships among them. In this context the so-called 2-sections and\nL2-sections are considered. These constructions are closely linked to related\ncolored graph structures that seem to be a useful tool for the prime factor\ndecompositions w.r.t.\\ specific hypergraph products. We summarize the current\nknowledge on the propagation of hypergraph invariants under the different\nhypergraph multiplications. While the overwhelming majority of the material\nconcerns finite (undirected) hypergraphs, the survey also covers a summary of\nthe few results on products of infinite and directed hypergraphs.\n", "title": "A Survey on Hypergraph Products (Erratum)" }
null
null
null
null
true
null
2428
null
Default
null
null
null
{ "abstract": " Simple scaling consideration and NRG solution of the one- and two-channel\nKondo model in the presence of a logarithmic Van Hove singularity at the Fermi\nlevel is given. The temperature dependences of local and impurity magnetic\nsusceptibility and impurity entropy are calculated. The low-temperature\nbehavior of the impurity susceptibility and impurity entropy turns out to be\nnon-universal in the Kondo sense and independent of the $s-d$ coupling $J$. The\nresonant level model solution in the strong coupling regime confirms the NRG\nresults. In the two-channel case the local susceptibility demonstrates a\nnon-Fermi-liquid power-law behavior.\n", "title": "One- and two-channel Kondo model with logarithmic Van Hove singularity: a numerical renormalization group solution" }
null
null
null
null
true
null
2429
null
Default
null
null
null
{ "abstract": " This paper proposes a deep Convolutional Neural Network(CNN) with strong\ngeneralization ability for structural topology optimization. The architecture\nof the neural network is made up of encoding and decoding parts, which provide\ndown- and up-sampling operations. In addition, a popular technique, namely\nU-Net, was adopted to improve the performance of the proposed neural network.\nThe input of the neural network is a well-designed tensor with each channel\nincludes different information for the problem, and the output is the layout of\nthe optimal structure. To train the neural network, a large dataset is\ngenerated by a conventional topology optimization approach, i.e. SIMP. The\nperformance of the proposed method was evaluated by comparing its efficiency\nand accuracy with SIMP on a series of typical optimization problems. Results\nshow that a significant reduction in computation cost was achieved with little\nsacrifice on the optimality of design solutions. Furthermore, the proposed\nmethod can intelligently solve problems under boundary conditions not being\nincluded in the training dataset.\n", "title": "A deep Convolutional Neural Network for topology optimization with strong generalization ability" }
null
null
null
null
true
null
2430
null
Default
null
null
null
{ "abstract": " Recurrent Neural Networks (RNNs) with attention mechanisms have obtained\nstate-of-the-art results for many sequence processing tasks. Most of these\nmodels use a simple form of encoder with attention that looks over the entire\nsequence and assigns a weight to each token independently. We present a\nmechanism for focusing RNN encoders for sequence modelling tasks which allows\nthem to attend to key parts of the input as needed. We formulate this using a\nmulti-layer conditional sequence encoder that reads in one token at a time and\nmakes a discrete decision on whether the token is relevant to the context or\nquestion being asked. The discrete gating mechanism takes in the context\nembedding and the current hidden state as inputs and controls information flow\ninto the layer above. We train it using policy gradient methods. We evaluate\nthis method on several types of tasks with different attributes. First, we\nevaluate the method on synthetic tasks which allow us to evaluate the model for\nits generalization ability and probe the behavior of the gates in more\ncontrolled settings. We then evaluate this approach on large scale Question\nAnswering tasks including the challenging MS MARCO and SearchQA tasks. Our\nmodels shows consistent improvements for both tasks over prior work and our\nbaselines. It has also shown to generalize significantly better on synthetic\ntasks as compared to the baselines.\n", "title": "Focused Hierarchical RNNs for Conditional Sequence Processing" }
null
null
null
null
true
null
2431
null
Default
null
null
null
{ "abstract": " The paper describes the Faraday room that shields the CUORE experiment\nagainst electromagnetic fields, from 50 Hz up to high frequency. Practical\ncontraints led to choose panels made of light shielding materials. The seams\nbetween panels were optimized with simulations to minimize leakage.\nMeasurements of shielding performance show attenuation of a factor 15 at 50 Hz,\nand a factor 1000 above 1 KHz up to about 100 MHz.\n", "title": "The Faraday room of the CUORE Experiment" }
null
null
null
null
true
null
2432
null
Default
null
null
null
{ "abstract": " We describe a benchmark study of collective and nonlinear dynamics in an APS\nstorage ring. A 1-mm long bunch was assumed in the calculation of wakefield and\nelement by element particle tracking with distributed wakefield component along\nthe ring was performed in Elegant simulation. The result of Elegant simulation\ndiffered by less than 5 % from experimental measurement\n", "title": "Simulations and measurements of the impact of collective effects on dynamic aperture" }
null
null
null
null
true
null
2433
null
Default
null
null
null
{ "abstract": " Correlated random fields are a common way to model dependence struc- tures in\nhigh-dimensional data, especially for data collected in imaging. One important\nparameter characterizing the degree of dependence is the asymp- totic variance\nwhich adds up all autocovariances in the temporal and spatial domain.\nEspecially, it arises in the standardization of test statistics based on\npartial sums of random fields and thus the construction of tests requires its\nestimation. In this paper we propose consistent estimators for this parameter\nfor strictly stationary {\\phi}-mixing random fields with arbitrary dimension of\nthe domain and taking values in a Euclidean space of arbitrary dimension, thus\nallowing for multivariate random fields. We establish consistency, provide cen-\ntral limit theorems and show that distributional approximations of related test\nstatistics based on sample autocovariances of random fields can be obtained by\nthe subsampling approach. As in applications the spatial-temporal correlations\nare often quite local, such that a large number of autocovariances vanish or\nare negligible, we also investigate a thresholding approach where sample\nautocovariances of small magnitude are omitted. Extensive simulation studies\nshow that the proposed estimators work well in practice and, when used to\nstandardize image test statistics, can provide highly accurate image testing\nprocedures.\n", "title": "Estimation of the asymptotic variance of univariate and multivariate random fields and statistical inference" }
null
null
null
null
true
null
2434
null
Default
null
null
null
{ "abstract": " Deep learning approaches such as convolutional neural nets have consistently\noutperformed previous methods on challenging tasks such as dense, semantic\nsegmentation. However, the various proposed networks perform differently, with\nbehaviour largely influenced by architectural choices and training settings.\nThis paper explores Ensembles of Multiple Models and Architectures (EMMA) for\nrobust performance through aggregation of predictions from a wide range of\nmethods. The approach reduces the influence of the meta-parameters of\nindividual models and the risk of overfitting the configuration to a particular\ndatabase. EMMA can be seen as an unbiased, generic deep learning model which is\nshown to yield excellent performance, winning the first position in the BRATS\n2017 competition among 50+ participating teams.\n", "title": "Ensembles of Multiple Models and Architectures for Robust Brain Tumour Segmentation" }
null
null
null
null
true
null
2435
null
Default
null
null
null
{ "abstract": " A SPaT (Signal Phase and Timing) message describes for each lane the current\nphase at a signalized intersection together with an estimate of the residual\ntime of that phase. Accurate SPaT messages can be used to construct a speed\nprofile for a vehicle that reduces its fuel consumption as it approaches or\nleaves an intersection. This paper presents SPaT estimation algorithms at an\nintersection with a semi-actuated signal, using real-time signal phase\nmeasurements. The algorithms are evaluated using high-resolution data from two\nintersections in Montgomery County, MD. The algorithms can be readily\nimplemented at signal controllers. The study supports three findings. First,\nreal-time information dramatically improves the accuracy of the prediction of\nthe residual time compared with prediction based on historical data alone.\nSecond, as time increases the prediction of the residual time may increase or\ndecrease. Third, as drivers differently weight errors in predicting `end of\ngreen' and `end of red', drivers on two different approaches may prefer\ndifferent estimates of the residual time of the same phase.\n", "title": "Estimating Phase Duration for SPaT Messages" }
null
null
null
null
true
null
2436
null
Default
null
null
null
{ "abstract": " In this paper, we propose a novel method to estimate and characterize spatial\nvariations on dies or wafers. This new technique exploits recent developments\nin matrix completion, enabling estimation of spatial variation across wafers or\ndies with a small number of randomly picked sampling points while still\nachieving fairly high accuracy. This new approach can be easily generalized,\nincluding for estimation of mixed spatial and structure or device type\ninformation.\n", "title": "Efficient Spatial Variation Characterization via Matrix Completion" }
null
null
null
null
true
null
2437
null
Default
null
null
null
{ "abstract": " In this work, a novel approach is presented to solve the problem of tracking\ntrajectories in autonomous vehicles. This approach is based on the use of a\ncascade control where the external loop solves the position control using a\nnovel Takagi Sugeno - Model Predictive Control (TS-MPC) approach and the\ninternal loop is in charge of the dynamic control of the vehicle using a Takagi\nSugeno - Linear Quadratic Regulator technique designed via Linear Matrix\nInequalities (TS-LMI-LQR). Both techniques use a TS representation of the\nkinematic and dynamic models of the vehicle. In addition, a novel Takagi Sugeno\nestimator - Moving Horizon Estimator - Unknown Input Observer (TS-MHE-UIO) is\npresented. This method estimates the dynamic states of the vehicle optimally as\nwell as the force of friction acting on the vehicle that is used to reduce the\ncontrol efforts. The innovative contribution of the TS-MPC and TS-MHE-UIO\ntechniques is that using the TS model formulation of the vehicle allows us to\nsolve the nonlinear problem as if it were linear, reducing computation times by\n40-50 times. To demonstrate the potential of the TS-MPC we propose a comparison\nbetween three methods of solving the kinematic control problem: using the\nnon-linear MPC formulation (NL-MPC), using TS-MPC without updating the\nprediction model and using updated TS-MPC with the references of the planner.\n", "title": "TS-MPC for Autonomous Vehicles including a dynamic TS-MHE-UIO" }
null
null
null
null
true
null
2438
null
Default
null
null
null
{ "abstract": " Flexibility is a key enabler for the smart grid, required to facilitate\nDemand Side Management (DSM) programs, managing electrical consumption to\nreduce peaks, balance renewable generation and provide ancillary services to\nthe grid. Flexibility analysis is required to identify and quantify the\navailable electrical load of a site or building which can be shed or increased\nin response to a DSM signal. A methodology for assessing flexibility is\ndeveloped, based on flexibility formulations and optimization requirements. The\nmethodology characterizes the loads, storage and on-site generation,\nincorporates site assessment using the ISO 50002:2014 energy audit standard and\nbenchmarks performance against documented studies. An example application of\nthe methodology is detailed using a pilot site demonstrator.\n", "title": "Flexibility Analysis for Smart Grid Demand Response" }
null
null
[ "Computer Science", "Mathematics" ]
null
true
null
2439
null
Validated
null
null
null
{ "abstract": " This paper describes the Duluth system that participated in SemEval-2017 Task\n6 #HashtagWars: Learning a Sense of Humor. The system participated in Subtasks\nA and B using N-gram language models, ranking highly in the task evaluation.\nThis paper discusses the results of our system in the development and\nevaluation stages and from two post-evaluation runs.\n", "title": "Duluth at SemEval-2017 Task 6: Language Models in Humor Detection" }
null
null
null
null
true
null
2440
null
Default
null
null
null
{ "abstract": " This work presents an innovative application of the well-known concept of\ncortico-muscular coherence for the classification of various motor tasks, i.e.,\ngrasps of different kinds of objects. Our approach can classify objects with\ndifferent weights (motor-related features) and different surface frictions\n(haptics-related features) with high accuracy (over 0:8). The outcomes\npresented here provide information about the synchronization existing between\nthe brain and the muscles during specific activities; thus, this may represent\na new effective way to perform activity recognition.\n", "title": "Classification of grasping tasks based on EEG-EMG coherence" }
null
null
null
null
true
null
2441
null
Default
null
null
null
{ "abstract": " Stellar evolution models are most uncertain for evolved massive stars.\nAsteroseismology based on high-precision uninterrupted space photometry has\nbecome a new way to test the outcome of stellar evolution theory and was\nrecently applied to a multitude of stars, but not yet to massive evolved\nsupergiants.Our aim is to detect, analyse and interpret the photospheric and\nwind variability of the O9.5Iab star HD 188209 from Kepler space photometry and\nlong-term high-resolution spectroscopy. We used Kepler scattered-light\nphotometry obtained by the nominal mission during 1460d to deduce the\nphotometric variability of this O-type supergiant. In addition, we assembled\nand analysed high-resolution high signal-to-noise spectroscopy taken with four\nspectrographs during some 1800d to interpret the temporal spectroscopic\nvariability of the star. The variability of this blue supergiant derived from\nthe scattered-light space photometry is in full in agreement with the one found\nin the ground-based spectroscopy. We find significant low-frequency variability\nthat is consistently detected in all spectral lines of HD 188209. The\nphotospheric variability propagates into the wind, where it has similar\nfrequencies but slightly higher amplitudes. The morphology of the frequency\nspectra derived from the long-term photometry and spectroscopy points towards a\nspectrum of travelling waves with frequency values in the range expected for an\nevolved O-type star. Convectively-driven internal gravity waves excited in the\nstellar interior offer the most plausible explanation of the detected\nvariability.\n", "title": "Kepler sheds new and unprecedented light on the variability of a blue supergiant: gravity waves in the O9.5Iab star HD 188209" }
null
null
null
null
true
null
2442
null
Default
null
null
null
{ "abstract": " We study complexity of short sentences in Presburger arithmetic (Short-PA).\nHere by \"short\" we mean sentences with a bounded number of variables,\nquantifiers, inequalities and Boolean operations; the input consists only of\nthe integers involved in the inequalities. We prove that assuming Kannan's\npartition can be found in polynomial time, the satisfiability of Short-PA\nsentences can be decided in polynomial time. Furthermore, under the same\nassumption, we show that the numbers of satisfying assignments of short\nPresburger sentences can also be computed in polynomial time.\n", "title": "Complexity of short Presburger arithmetic" }
null
null
null
null
true
null
2443
null
Default
null
null
null
{ "abstract": " Mean square error (MSE) has been the preferred choice as loss function in the\ncurrent deep neural network (DNN) based speech separation techniques. In this\npaper, we propose a new cost function with the aim of optimizing the extended\nshort time objective intelligibility (ESTOI) measure. We focus on applications\nwhere low algorithmic latency ($\\leq 10$ ms) is important. We use long\nshort-term memory networks (LSTM) and evaluate our proposed approach on four\nsets of two-speaker mixtures from extended Danish hearing in noise (HINT)\ndataset. We show that the proposed loss function can offer improved or at par\nobjective intelligibility (in terms of ESTOI) compared to an MSE optimized\nbaseline while resulting in lower objective separation performance (in terms of\nthe source to distortion ratio (SDR)). We then proceed to propose an approach\nwhere the network is first initialized with weights optimized for MSE criterion\nand then trained with the proposed ESTOI loss criterion. This approach\nmitigates some of the losses in objective separation performance while\npreserving the gains in objective intelligibility.\n", "title": "Deep neural network based speech separation optimizing an objective estimator of intelligibility for low latency applications" }
null
null
[ "Computer Science" ]
null
true
null
2444
null
Validated
null
null
null
{ "abstract": " We introduce a new application of measuring symplectic generators to\ncharacterize and control the linear betatron coupling in storage rings. From\nsynchronized and consecutive BPM (Beam Position Monitor) turn-by-turn (TbT)\nreadings, symplectic Lie generators describing the coupled linear dynamics are\nextracted. Four plane-crossing terms in the generators directly characterize\nthe coupling between the horizontal and the vertical planes. Coupling control\ncan be accomplished by utilizing the dependency of these plane-crossing terms\non skew quadrupoles. The method has been successfully demonstrated to reduce\nthe vertical effective emittance down to the diffraction limit in the newly\nconstructed National Synchrotron Light Source II (NSLS-II) storage ring. This\nmethod can be automatized to realize linear coupling feedback control with\nnegligible disturbance on machine operation.\n", "title": "Characterization and control of linear coupling using turn-by-turn beam position monitor data in storage rings" }
null
null
null
null
true
null
2445
null
Default
null
null
null
{ "abstract": " We consider the problem of undirected graphical model inference. In many\napplications, instead of perfectly recovering the unknown graph structure, a\nmore realistic goal is to infer some graph invariants (e.g., the maximum\ndegree, the number of connected subgraphs, the number of isolated nodes). In\nthis paper, we propose a new inferential framework for testing nested multiple\nhypotheses and constructing confidence intervals of the unknown graph\ninvariants under undirected graphical models. Compared to perfect graph\nrecovery, our methods require significantly weaker conditions. This paper makes\ntwo major contributions: (i) Methodologically, for testing nested multiple\nhypotheses, we propose a skip-down algorithm on the whole family of monotone\ngraph invariants (The invariants which are non-decreasing under addition of\nedges). We further show that the same skip-down algorithm also provides valid\nconfidence intervals for the targeted graph invariants. (ii) Theoretically, we\nprove that the length of the obtained confidence intervals are optimal and\nadaptive to the unknown signal strength. We also prove generic lower bounds for\nthe confidence interval length for various invariants. Numerical results on\nboth synthetic simulations and a brain imaging dataset are provided to\nillustrate the usefulness of the proposed method.\n", "title": "Adaptive Inferential Method for Monotone Graph Invariants" }
null
null
null
null
true
null
2446
null
Default
null
null
null
{ "abstract": " High-dose-rate brachytherapy is a tumor treatment method where a highly\nradioactive source is brought in close proximity to the tumor. In this paper we\ndevelop a simulated annealing algorithm to optimize the dwell times at\npreselected dwell positions to maximize tumor coverage under dose-volume\nconstraints on the organs at risk. Compared to existing algorithms, our\nalgorithm has advantages in terms of speed and objective value and does not\nrequire an expensive general purpose solver. Its success mainly depends on\nexploiting the efficiency of matrix multiplication and a careful selection of\nthe neighboring states. In this paper we outline its details and make an\nin-depth comparison with existing methods using real patient data.\n", "title": "High-dose-rate prostate brachytherapy inverse planning on dose-volume criteria by simulated annealing" }
null
null
null
null
true
null
2447
null
Default
null
null
null
{ "abstract": " We use the scattering network as a generic and fixed ini-tialization of the\nfirst layers of a supervised hybrid deep network. We show that early layers do\nnot necessarily need to be learned, providing the best results to-date with\npre-defined representations while being competitive with Deep CNNs. Using a\nshallow cascade of 1 x 1 convolutions, which encodes scattering coefficients\nthat correspond to spatial windows of very small sizes, permits to obtain\nAlexNet accuracy on the imagenet ILSVRC2012. We demonstrate that this local\nencoding explicitly learns invariance w.r.t. rotations. Combining scattering\nnetworks with a modern ResNet, we achieve a single-crop top 5 error of 11.4% on\nimagenet ILSVRC2012, comparable to the Resnet-18 architecture, while utilizing\nonly 10 layers. We also find that hybrid architectures can yield excellent\nperformance in the small sample regime, exceeding their end-to-end\ncounterparts, through their ability to incorporate geometrical priors. We\ndemonstrate this on subsets of the CIFAR-10 dataset and on the STL-10 dataset.\n", "title": "Scaling the Scattering Transform: Deep Hybrid Networks" }
null
null
[ "Computer Science" ]
null
true
null
2448
null
Validated
null
null
null
{ "abstract": " We studied how lagged linear regression can be used to detect the physiologic\neffects of drugs from data in the electronic health record (EHR). We\nsystematically examined the effect of methodological variations ((i) time\nseries construction, (ii) temporal parameterization, (iii) intra-subject\nnormalization, (iv) differencing (lagged rates of change achieved by taking\ndifferences between consecutive measurements), (v) explanatory variables, and\n(vi) regression models) on performance of lagged linear methods in this\ncontext. We generated two gold standards (one knowledge-base derived, one\nexpert-curated) for expected pairwise relationships between 7 drugs and 4 labs,\nand evaluated how the 64 unique combinations of methodological perturbations\nreproduce gold standards. Our 28 cohorts included patients in Columbia\nUniversity Medical Center/NewYork-Presbyterian Hospital clinical database. The\nmost accurate methods achieved AUROC of 0.794 for knowledge-base derived gold\nstandard (95%CI [0.741, 0.847]) and 0.705 for expert-curated gold standard (95%\nCI [0.629, 0.781]). We observed a 0.633 mean AUROC (95%CI [0.610, 0.657],\nexpert-curated gold standard) across all methods that re-parameterize time\naccording to sequence and use either a joint autoregressive model with\ndifferencing or an independent lag model without differencing. The complement\nof this set of methods achieved a mean AUROC close to 0.5, indicating the\nimportance of these choices. We conclude that time- series analysis of EHR data\nwill likely rely on some of the beneficial pre-processing and modeling\nmethodologies identified, and will certainly benefit from continued careful\nanalysis of methodological perturbations. This study found that methodological\nvariations, such as pre-processing and representations, significantly affect\nresults, exposing the importance of evaluating these components when comparing\nmachine-learning methods.\n", "title": "Methodological variations in lagged regression for detecting physiologic drug effects in EHR data" }
null
null
null
null
true
null
2449
null
Default
null
null
null
{ "abstract": " We develop a complexity measure for large-scale economic systems based on\nShannon's concept of entropy. By adopting Leontief's perspective of the\nproduction process as a circular flow, we formulate the process as a Markov\nchain. Then we derive a measure of economic complexity as the average number of\nbits required to encode the flow of goods and services in the production\nprocess. We illustrate this measure using data from seven national economies,\nspanning several decades.\n", "title": "Leontief Meets Shannon - Measuring the Complexity of the Economic System" }
null
null
[ "Physics", "Statistics" ]
null
true
null
2450
null
Validated
null
null
null
{ "abstract": " The net contribution of the strange quark spins to the proton spin, $\\Delta\ns$, can be determined from neutral current elastic neutrino-proton interactions\nat low momentum transfer combined with data from electron-proton scattering.\nThe probability of neutrino-proton interactions depends in part on the axial\nform factor, which represents the spin structure of the proton and can be\nseparated into its quark flavor contributions. Low momentum transfer neutrino\nneutral current interactions can be measured in MicroBooNE, a high-resolution\nliquid argon time projection chamber (LArTPC) in its first year of running in\nthe Booster Neutrino Beamline at Fermilab. The signal for these interactions in\nMicroBooNE is a single short proton track. We present our work on the automated\nreconstruction and classification of proton tracks in LArTPCs, an important\nstep in the determination of neutrino- nucleon cross sections and the\nmeasurement of $\\Delta s$.\n", "title": "Exploring nucleon spin structure through neutrino neutral-current interactions in MicroBooNE" }
null
null
null
null
true
null
2451
null
Default
null
null
null
{ "abstract": " Carmesin, Federici, and Georgakopoulos [arXiv:1603.06712] constructed a\ntransient hyperbolic graph that has no transient subtrees and that has the\nLiouville property for harmonic functions. We modify their construction to get\na unimodular random graph with the same properties.\n", "title": "A unimodular Liouville hyperbolic souvlaki --- an appendix to [arXiv:1603.06712]" }
null
null
[ "Mathematics" ]
null
true
null
2452
null
Validated
null
null
null
{ "abstract": " We consider machine learning in a comparison-based setting where we are given\na set of points in a metric space, but we have no access to the actual\ndistances between the points. Instead, we can only ask an oracle whether the\ndistance between two points $i$ and $j$ is smaller than the distance between\nthe points $i$ and $k$. We are concerned with data structures and algorithms to\nfind nearest neighbors based on such comparisons. We focus on a simple yet\neffective algorithm that recursively splits the space by first selecting two\nrandom pivot points and then assigning all other points to the closer of the\ntwo (comparison tree). We prove that if the metric space satisfies certain\nexpansion conditions, then with high probability the height of the comparison\ntree is logarithmic in the number of points, leading to efficient search\nperformance. We also provide an upper bound for the failure probability to\nreturn the true nearest neighbor. Experiments show that the comparison tree is\ncompetitive with algorithms that have access to the actual distance values, and\nneeds less triplet comparisons than other competitors.\n", "title": "Comparison Based Nearest Neighbor Search" }
null
null
null
null
true
null
2453
null
Default
null
null
null
{ "abstract": " Predicting the completion time of business process instances would be a very\nhelpful aid when managing processes under service level agreement constraints.\nThe ability to know in advance the trend of running process instances would\nallow business managers to react in time, in order to prevent delays or\nundesirable situations. However, making such accurate forecasts is not easy:\nmany factors may influence the required time to complete a process instance. In\nthis paper, we propose an approach based on deep Recurrent Neural Networks\n(specifically LSTMs) that is able to exploit arbitrary information associated\nto single events, in order to produce an as-accurate-as-possible prediction of\nthe completion time of running instances. Experiments on real-world datasets\nconfirm the quality of our proposal.\n", "title": "LSTM Networks for Data-Aware Remaining Time Prediction of Business Process Instances" }
null
null
null
null
true
null
2454
null
Default
null
null
null
{ "abstract": " Quantum annealing (QA) is a generic method for solving optimization problems\nusing fictitious quantum fluctuation. The current device performing QA involves\ncontrolling the transverse field; it is classically simulatable by using the\nstandard technique for mapping the quantum spin systems to the classical ones.\nIn this sense, the current system for QA is not powerful despite utilizing\nquantum fluctuation. Hence, we developed a system with a time-dependent\nHamiltonian consisting of a combination of the formulated Ising model and the\n\"driver\" Hamiltonian with only quantum fluctuation. In the previous study, for\na fully connected spin model, quantum fluctuation can be addressed in a\nrelatively simple way. We proved that the fully connected antiferromagnetic\ninteraction can be transformed into a fluctuating transverse field and is thus\nclassically simulatable at sufficiently low temperatures. Using the fluctuating\ntransverse field, we established several ways to simulate part of the\nnonstoquastic Hamiltonian on classical computers. We formulated a\nmessage-passing algorithm in the present study. This algorithm is capable of\nassessing the performance of QA with part of the nonstoquastic Hamiltonian\nhaving a large number of spins. In other words, we developed a different\napproach for simulating the nonstoquastic Hamiltonian without using the quantum\nMonte Carlo technique. Our results were validated by comparison to the results\nobtained by the replica method.\n", "title": "Message-passing algorithm of quantum annealing with nonstoquastic Hamiltonian" }
null
null
null
null
true
null
2455
null
Default
null
null
null
{ "abstract": " Unwanted variation can be highly problematic and so its detection is often\ncrucial. Relative log expression (RLE) plots are a powerful tool for\nvisualising such variation in high dimensional data. We provide a detailed\nexamination of these plots, with the aid of examples and simulation, explaining\nwhat they are and what they can reveal. RLE plots are particularly useful for\nassessing whether a procedure aimed at removing unwanted variation, i.e. a\nnormalisation procedure, has been successful. These plots, while originally\ndevised for gene expression data from microarrays, can also be used to reveal\nunwanted variation in many other kinds of high dimensional data, where such\nvariation can be problematic.\n", "title": "RLE Plots: Visualising Unwanted Variation in High Dimensional Data" }
null
null
null
null
true
null
2456
null
Default
null
null
null
{ "abstract": " We present ALMA CO (2-1) detections in 11 gas-rich cluster galaxies at z~1.6,\nconstituting the largest sample of molecular gas measurements in z>1.5 clusters\nto date. The observations span three galaxy clusters, derived from the Spitzer\nAdaptation of the Red-sequence Cluster Survey. We augment the >5sigma\ndetections of the CO (2-1) fluxes with multi-band photometry, yielding stellar\nmasses and infrared-derived star formation rates, to place some of the first\nconstraints on molecular gas properties in z~1.6 cluster environments. We\nmeasure sizable gas reservoirs of 0.5-2x10^11 solar masses in these objects,\nwith high gas fractions and long depletion timescales, averaging 62% and 1.4\nGyr, respectively. We compare our cluster galaxies to the scaling relations of\nthe coeval field, in the context of how gas fractions and depletion timescales\nvary with respect to the star-forming main sequence. We find that our cluster\ngalaxies lie systematically off the field scaling relations at z=1.6 toward\nenhanced gas fractions, at a level of ~4sigma, but have consistent depletion\ntimescales. Exploiting CO detections in lower-redshift clusters from the\nliterature, we investigate the evolution of the gas fraction in cluster\ngalaxies, finding it to mimic the strong rise with redshift in the field. We\nemphasize the utility of detecting abundant gas-rich galaxies in high-redshift\nclusters, deeming them as crucial laboratories for future statistical studies.\n", "title": "ALMA Observations of Gas-Rich Galaxies in z~1.6 Galaxy Clusters: Evidence for Higher Gas Fractions in High-Density Environments" }
null
null
[ "Physics" ]
null
true
null
2457
null
Validated
null
null
null
{ "abstract": " Anatomical and biophysical modeling of left atrium (LA) and proximal\npulmonary veins (PPVs) is important for clinical management of several cardiac\ndiseases. Magnetic resonance imaging (MRI) allows qualitative assessment of LA\nand PPVs through visualization. However, there is a strong need for an advanced\nimage segmentation method to be applied to cardiac MRI for quantitative\nanalysis of LA and PPVs. In this study, we address this unmet clinical need by\nexploring a new deep learning-based segmentation strategy for quantification of\nLA and PPVs with high accuracy and heightened efficiency. Our approach is based\non a multi-view convolutional neural network (CNN) with an adaptive fusion\nstrategy and a new loss function that allows fast and more accurate convergence\nof the backpropagation based optimization. After training our network from\nscratch by using more than 60K 2D MRI images (slices), we have evaluated our\nsegmentation strategy to the STACOM 2013 cardiac segmentation challenge\nbenchmark. Qualitative and quantitative evaluations, obtained from the\nsegmentation challenge, indicate that the proposed method achieved the\nstate-of-the-art sensitivity (90%), specificity (99%), precision (94%), and\nefficiency levels (10 seconds in GPU, and 7.5 minutes in CPU).\n", "title": "CardiacNET: Segmentation of Left Atrium and Proximal Pulmonary Veins from MRI Using Multi-View CNN" }
null
null
[ "Computer Science", "Statistics" ]
null
true
null
2458
null
Validated
null
null
null
{ "abstract": " Data assimilation is widely used to improve flood forecasting capability,\nespecially through parameter inference requiring statistical information on the\nuncertain input parameters (upstream discharge, friction coefficient) as well\nas on the variability of the water level and its sensitivity with respect to\nthe inputs. For particle filter or ensemble Kalman filter, stochastically\nestimating probability density function and covariance matrices from a Monte\nCarlo random sampling requires a large ensemble of model evaluations, limiting\ntheir use in real-time application. To tackle this issue, fast surrogate models\nbased on Polynomial Chaos and Gaussian Process can be used to represent the\nspatially distributed water level in place of solving the shallow water\nequations. This study investigates the use of these surrogates to estimate\nprobability density functions and covariance matrices at a reduced\ncomputational cost and without the loss of accuracy, in the perspective of\nensemble-based data assimilation. This study focuses on 1-D steady state flow\nsimulated with MASCARET over the Garonne River (South-West France). Results\nshow that both surrogates feature similar performance to the Monte-Carlo random\nsampling, but for a much smaller computational budget; a few MASCARET\nsimulations (on the order of 10-100) are sufficient to accurately retrieve\ncovariance matrices and probability density functions all along the river, even\nwhere the flow dynamic is more complex due to heterogeneous bathymetry. This\npaves the way for the design of surrogate strategies suitable for representing\nunsteady open-channel flows in data assimilation.\n", "title": "Comparison of Polynomial Chaos and Gaussian Process surrogates for uncertainty quantification and correlation estimation of spatially distributed open-channel steady flows" }
null
null
null
null
true
null
2459
null
Default
null
null
null
{ "abstract": " The presence of ubiquitous magnetic fields in the universe is suggested from\nobservations of radiation and cosmic ray from galaxies or the intergalactic\nmedium (IGM). One possible origin of cosmic magnetic fields is the\nmagnetogenesis in the primordial universe. Such magnetic fields are called\nprimordial magnetic fields (PMFs), and are considered to affect the evolution\nof matter density fluctuations and the thermal history of the IGM gas. Hence\nthe information of PMFs is expected to be imprinted on the anisotropies of the\ncosmic microwave background (CMB) through the thermal Sunyaev-Zel'dovich (tSZ)\neffect in the IGM. In this study, given an initial power spectrum of PMFs as\n$P(k)\\propto B_{\\rm 1Mpc}^2 k^{n_{B}}$, we calculate dynamical and thermal\nevolutions of the IGM under the influence of PMFs, and compute the resultant\nangular power spectrum of the Compton $y$-parameter on the sky. As a result, we\nfind that two physical processes driven by PMFs dominantly determine the power\nspectrum of the Compton $y$-parameter; (i) the heating due to the ambipolar\ndiffusion effectively works to increase the temperature and the ionization\nfraction, and (ii) the Lorentz force drastically enhances the density contrast\njust after the recombination epoch. These facts result in making the tSZ\nangular power spectrum induced by the PMFs more remarkable at $\\ell >10^4$ than\nthat by galaxy clusters even with $B_{\\rm 1Mpc}=0.1$ nG and $n_{B}=-1.0$\nbecause the contribution from galaxy clusters decreases with increasing $\\ell$.\nThe measurement of the tSZ angular power spectrum on high $\\ell$ modes can\nprovide the stringent constraint on PMFs.\n", "title": "Thermal Sunyaev-Zel'dovich effect in the intergalactic medium with primordial magnetic fields" }
null
null
null
null
true
null
2460
null
Default
null
null
null
{ "abstract": " We study a model of two species of one-dimensional linearly dispersing\nfermions interacting via an s-wave Feshbach resonance at zero temperature.\nWhile this model is known to be integrable, it possesses novel features that\nhave not previously been investigated. Here, we present an exact solution based\non the coordinate Bethe Ansatz. In the limit of infinite resonance strength,\nwhich we term the strongly interacting limit, the two species of fermions\nbehave as free Fermi gases. In the limit of infinitely weak resonance, or the\nweakly interacting limit, the gases can be in different phases depending on the\ndetuning, the relative velocities of the particles, and the particle densities.\nWhen the molecule moves faster or slower than both species of atoms, the atomic\nvelocities get renormalized and the atoms may even become non-chiral. On the\nother hand, when the molecular velocity is between that of the atoms, the\nsystem may behave like a weakly interacting Lieb-Liniger gas.\n", "title": "One-dimensional model of chiral fermions with Feshbach resonant interactions" }
null
null
null
null
true
null
2461
null
Default
null
null
null
{ "abstract": " The positive semidefinite rank of a convex body $C$ is the size of its\nsmallest positive semidefinite formulation. We show that the positive\nsemidefinite rank of any convex body $C$ is at least $\\sqrt{\\log d}$ where $d$\nis the smallest degree of a polynomial that vanishes on the boundary of the\npolar of $C$. This improves on the existing bound which relies on results from\nquantifier elimination. The proof relies on the Bézout bound applied to the\nKarush-Kuhn-Tucker conditions of optimality. We discuss the connection with the\nalgebraic degree of semidefinite programming and show that the bound is tight\n(up to constant factor) for random spectrahedra of suitable dimension.\n", "title": "A lower bound on the positive semidefinite rank of convex bodies" }
null
null
null
null
true
null
2462
null
Default
null
null
null
{ "abstract": " We investigate the holonomy group of singular Kähler-Einstein metrics on\nklt varieties with numerically trivial canonical divisor. Finiteness of the\nnumber of connected components, a Bochner principle for holomorphic tensors,\nand a connection between irreducibility of holonomy representations and\nstability of the tangent sheaf are established. As a consequence, known\ndecompositions for tangent sheaves of varieties with trivial canonical divisor\nare refined. In particular, we show that up to finite quasi-étale covers,\nvarieties with strongly stable tangent sheaf are either Calabi-Yau or\nirreducible holomorphic symplectic. These results form one building block for\nHöring-Peternell's recent proof of a singular version of the\nBeauville-Bogomolov Decomposition Theorem.\n", "title": "Klt varieties with trivial canonical class - Holonomy, differential forms, and fundamental groups" }
null
null
null
null
true
null
2463
null
Default
null
null
null
{ "abstract": " Selective classification techniques (also known as reject option) have not\nyet been considered in the context of deep neural networks (DNNs). These\ntechniques can potentially significantly improve DNNs prediction performance by\ntrading-off coverage. In this paper we propose a method to construct a\nselective classifier given a trained neural network. Our method allows a user\nto set a desired risk level. At test time, the classifier rejects instances as\nneeded, to grant the desired risk (with high probability). Empirical results\nover CIFAR and ImageNet convincingly demonstrate the viability of our method,\nwhich opens up possibilities to operate DNNs in mission-critical applications.\nFor example, using our method an unprecedented 2% error in top-5 ImageNet\nclassification can be guaranteed with probability 99.9%, and almost 60% test\ncoverage.\n", "title": "Selective Classification for Deep Neural Networks" }
null
null
null
null
true
null
2464
null
Default
null
null
null
{ "abstract": " Cognitive computing systems require human labeled data for evaluation, and\noften for training. The standard practice used in gathering this data minimizes\ndisagreement between annotators, and we have found this results in data that\nfails to account for the ambiguity inherent in language. We have proposed the\nCrowdTruth method for collecting ground truth through crowdsourcing, that\nreconsiders the role of people in machine learning based on the observation\nthat disagreement between annotators provides a useful signal for phenomena\nsuch as ambiguity in the text. We report on using this method to build an\nannotated data set for medical relation extraction for the $cause$ and $treat$\nrelations, and how this data performed in a supervised training experiment. We\ndemonstrate that by modeling ambiguity, labeled data gathered from crowd\nworkers can (1) reach the level of quality of domain experts for this task\nwhile reducing the cost, and (2) provide better training data at scale than\ndistant supervision. We further propose and validate new weighted measures for\nprecision, recall, and F-measure, that account for ambiguity in both human and\nmachine performance on this task.\n", "title": "Crowdsourcing Ground Truth for Medical Relation Extraction" }
null
null
null
null
true
null
2465
null
Default
null
null
null
{ "abstract": " We demonstrate a random bit streaming system that uses a chaotic laser as its\nphysical entropy source. By performing real-time bit manipulation for bias\nreduction, we were able to provide the memory of a personal computer with a\nconstant supply of ready-to-use physical random bits at a throughput of up to 4\nGbps. We pay special attention to the end-to-end entropy source model\ndescribing how the entropy from physical sources is converted into bit entropy.\nWe confirmed the statistical quality of the generated random bits by revealing\nthe pass rate of the NIST SP800-22 test suite to be 65 % to 75 %, which is\ncommonly considered acceptable for a reliable random bit generator. We also\nconfirmed the stable operation of our random bit steaming system with long-term\nbias monitoring.\n", "title": "Chaotic laser based physical random bit streaming system with a computer application interface" }
null
null
null
null
true
null
2466
null
Default
null
null
null
{ "abstract": " We have investigated the electronic states and spin polarization of\nhalf-metallic ferromagnet CrO$_2$ (100) epitaxial films by bulk-sensitive\nspin-resolved photoemission spectroscopy with a focus on non-quasiparticle\n(NQP) states derived from electron-magnon interactions. We found that the\naveraged values of the spin polarization are approximately 100% and 40% at 40 K\nand 300 K, respectively. This is consistent with the previously reported result\n[H. Fujiwara et al., Appl. Phys. Lett. 106, 202404 (2015).]. At 100 K, peculiar\nspin depolarization was observed at the Fermi level ($E_{F}$), which is\nsupported by theoretical calculations predicting NQP states. This suggests the\npossible appearance of NQP states in CrO$_2$. We also compare the temperature\ndependence of our spin polarizations with that of the magnetization.\n", "title": "Observation of Intrinsic Half-metallic Behavior of CrO$_2$ (100) Epitaxial Films by Bulk-sensitive Spin-resolved PES" }
null
null
[ "Physics" ]
null
true
null
2467
null
Validated
null
null
null
{ "abstract": " A quality assurance and performance qualification laboratory was built at\nMcGill University for the Canadian-made small-strip Thin Gap Chamber (sTGC)\nmuon detectors produced for the 2019-2020 ATLAS experiment muon spectrometer\nupgrade. The facility uses cosmic rays as a muon source to ionise the quenching\ngas mixture of pentane and carbon dioxide flowing through the sTGC detector. A\ngas system was developed and characterised for this purpose, with a simple and\nefficient gas condenser design utilizing a Peltier thermoelectric cooler (TEC).\nThe gas system was tested to provide the desired 45 vol% pentane concentration.\nFor continuous operations, a state-machine system was implemented with alerting\nand remote monitoring features to run all cosmic-ray data-acquisition\nassociated slow-control systems, such as high/low voltage, gas system and\nenvironmental monitoring, in a safe and continuous mode, even in the absence of\nan operator.\n", "title": "Development and Characterisation of a Gas System and its Associated Slow-Control System for an ATLAS Small-Strip Thin Gap Chamber Testing Facility" }
null
null
null
null
true
null
2468
null
Default
null
null
null
{ "abstract": " We search for the signature of universal properties of extreme events,\ntheoretically predicted for Axiom A flows, in a chaotic and high dimensional\ndynamical system by studying the convergence of GEV (Generalized Extreme Value)\nand GP (Generalized Pareto) shape parameter estimates to a theoretical value,\nexpressed in terms of partial dimensions of the attractor, which are global\nproperties. We consider a two layer quasi-geostrophic (QG) atmospheric model\nusing two forcing levels, and analyse extremes of different types of physical\nobservables (local, zonally-averaged energy, and the average value of energy\nover the mid-latitudes). Regarding the predicted universality, we find closer\nagreement in the shape parameter estimates only in the case of strong forcing,\nproducing a highly chaotic behaviour, for some observables (the local energy at\nevery latitude). Due to the limited (though very large) data size and the\npresence of serial correlations, it is difficult to obtain robust statistics of\nextremes in case of the other observables. In the case of weak forcing,\ninducing a less pronounced chaotic flow with regime behaviour, we find worse\nagreement with the theory developed for Axiom A flows, which is unsurprising\nconsidering the properties of the system.\n", "title": "Convergence of extreme value statistics in a two-layer quasi-geostrophic atmospheric model" }
null
null
null
null
true
null
2469
null
Default
null
null
null
{ "abstract": " We present a search for metal absorption line systems at the highest\nredshifts to date using a deep (30h) VLT/X-Shooter spectrum of the z = 7.084\nquasi-stellar object (QSO) ULAS J1120+0641. We detect seven intervening systems\nat z > 5.5, with the highest-redshift system being a C IV absorber at z = 6.51.\nWe find tentative evidence that the mass density of C IV remains flat or\ndeclines with redshift at z < 6, while the number density of C II systems\nremains relatively flat over 5 < z < 7. These trends are broadly consistent\nwith models of chemical enrichment by star formation-driven winds that include\na softening of the ultraviolet background towards higher redshifts. We find a\nlarger number of weak ( W_rest < 0.3A ) Mg II systems over 5.9 < z < 7.0 than\npredicted by a power-law fit to the number density of stronger systems. This is\nconsistent with trends in the number density of weak Mg II systems at z = 2.5,\nand suggests that the mechanisms that create these absorbers are already in\nplace at z = 7. Finally, we investigate the associated narrow Si IV, C IV, and\nN V absorbers located near the QSO redshift, and find that at least one\ncomponent shows evidence of partial covering of the continuum source.\n", "title": "A deep search for metals near redshift 7: the line-of-sight towards ULAS J1120+0641" }
null
null
null
null
true
null
2470
null
Default
null
null
null
{ "abstract": " By year 2020, the number of smartphone users globally will reach 3 Billion\nand the mobile data traffic (cellular + WiFi) will exceed PC internet traffic\nthe first time. As the number of smartphone users and the amount of data\ntransferred per smartphone grow exponentially, limited battery power is\nbecoming an increasingly critical problem for mobile devices which increasingly\ndepend on network I/O. Despite the growing body of research in power management\ntechniques for the mobile devices at the hardware layer as well as the lower\nlayers of the networking stack, there has been little work focusing on saving\nenergy at the application layer for the mobile systems during network I/O. In\nthis paper, to the best of our knowledge, we are first to provide an in depth\nanalysis of the effects of application layer data transfer protocol parameters\non the energy consumption of mobile phones. We show that significant energy\nsavings can be achieved with application layer solutions at the mobile systems\nduring data transfer with no or minimal performance penalty. In many cases,\nperformance increase and energy savings can be achieved simultaneously.\n", "title": "Energy-Performance Trade-offs in Mobile Data Transfers" }
null
null
null
null
true
null
2471
null
Default
null
null
null
{ "abstract": " Motivated by the model- independent pricing of derivatives calibrated to the\nreal market, we consider an optimization problem similar to the optimal\nSkorokhod embedding problem, where the embedded Brownian motion needs only to\nreproduce a finite number of prices of Vanilla options. We derive in this paper\nthe corresponding dualities and the geometric characterization of optimizers.\nThen we show a stability result, i.e. when more and more Vanilla options are\ngiven, the optimization problem converges to an optimal Skorokhod embedding\nproblem, which constitutes the basis of the numerical computation in practice.\nIn addition, by means of different metrics on the space of probability\nmeasures, a convergence rate analysis is provided under suitable conditions.\n", "title": "A stability result on optimal Skorokhod embedding" }
null
null
null
null
true
null
2472
null
Default
null
null
null
{ "abstract": " This paper aims to provide a better understanding of a symmetric loss. First,\nwe show that using a symmetric loss is advantageous in the balanced error rate\n(BER) minimization and area under the receiver operating characteristic curve\n(AUC) maximization from corrupted labels. Second, we prove general theoretical\nproperties of symmetric losses, including a classification-calibration\ncondition, excess risk bound, conditional risk minimizer, and AUC-consistency\ncondition. Third, since all nonnegative symmetric losses are non-convex, we\npropose a convex barrier hinge loss that benefits significantly from the\nsymmetric condition, although it is not symmetric everywhere. Finally, we\nconduct experiments on BER and AUC optimization from corrupted labels to\nvalidate the relevance of the symmetric condition.\n", "title": "On Symmetric Losses for Learning from Corrupted Labels" }
null
null
null
null
true
null
2473
null
Default
null
null
null
{ "abstract": " The ease of integration coupled with large second-order nonlinear coefficient\nof atomically thin layered 2D materials presents a unique opportunity to\nrealize second-order nonlinearity in silicon compatible integrated photonic\nsystem. However, the phase matching requirement for second-order nonlinear\noptical processes makes the nanophotonic design difficult. We show that by\nnano-patterning the 2D material, quasi-phase matching can be achieved. Such\npatterning based phase-matching could potentially compensate for inevitable\nfabrication errors and significantly simplify the design process of the\nnonlinear nano-photonic devices.\n", "title": "Phase matched nonlinear optics via patterning layered materials" }
null
null
null
null
true
null
2474
null
Default
null
null
null
{ "abstract": " We prove that any cyclic quadrilateral can be inscribed in any closed convex\n$C^1$-curve. The smoothness condition is not required if the quadrilateral is a\nrectangle.\n", "title": "Any cyclic quadrilateral can be inscribed in any closed convex smooth curve" }
null
null
null
null
true
null
2475
null
Default
null
null
null
{ "abstract": " We prove that for a free noncyclic group $F$, $H_2(\\hat F_\\mathbb Q, \\mathbb\nQ)$ is an uncountable $\\mathbb Q$-vector space. Here $\\hat F_\\mathbb Q$ is the\n$\\mathbb Q$-completion of $F$. This answers a problem of A.K. Bousfield for the\ncase of rational coefficients. As a direct consequence of this result it\nfollows that, a wedge of circles is $\\mathbb Q$-bad in the sense of\nBousfield-Kan. The same methods as used in the proof of the above results allow\nto show that, the homology $H_2(\\hat F_\\mathbb Z,\\mathbb Z)$ is not divisible\ngroup, where $\\hat F_\\mathbb Z$ is the integral pronilpotent completion of $F$.\n", "title": "A finite Q-bad space" }
null
null
null
null
true
null
2476
null
Default
null
null
null
{ "abstract": " Intentional or unintentional contacts are bound to occur increasingly more\noften due to the deployment of autonomous systems in human environments. In\nthis paper, we devise methods to computationally predict imminent collisions\nbetween objects, robots and people, and use an upper-body humanoid robot to\nblock them if they are likely to happen. We employ statistical methods for\neffective collision prediction followed by sensor-based trajectory generation\nand real-time control to attempt to stop the likely collisions using the most\nfavorable part of the blocking robot. We thoroughly investigate collisions in\nvarious types of experimental setups involving objects, robots, and people.\nOverall, the main contribution of this paper is to devise sensor-based\nprediction, trajectory generation and control processes for highly articulated\nrobots to prevent collisions against people, and conduct numerous experiments\nto validate this approach.\n", "title": "On Blocking Collisions between People, Objects and other Robots" }
null
null
null
null
true
null
2477
null
Default
null
null
null
{ "abstract": " The number of published findings in biomedicine increases continually. At the\nsame time, specifics of the domain's terminology complicates the task of\nrelevant publications retrieval. In the current research, we investigate\ninfluence of terms' variability and ambiguity on a paper's likelihood of being\nretrieved. We obtained statistics that demonstrate significance of the issue\nand its challenges, followed by presenting the sci.AI platform, which allows\nprecise terms labeling as a resolution.\n", "title": "Increasing Papers' Discoverability with Precise Semantic Labeling: the sci.AI Platform" }
null
null
null
null
true
null
2478
null
Default
null
null
null
{ "abstract": " We consider a classical problem of control of an inverted pendulum by means\nof a horizontal motion of its pivot point. We suppose that the control law can\nbe non-autonomous and non-periodic w.r.t. the position of the pendulum. It is\nshown that global stabilization of the vertical upward position of the pendulum\ncannot be obtained for any Lipschitz control law, provided some natural\nassumptions. Moreover, we show that there always exists a solution separated\nfrom the vertical position and along which the pendulum never becomes\nhorizontal. Hence, we also prove that global stabilization cannot be obtained\nin the system where the pendulum can impact the horizontal plane (for any\nmechanical model of impact). Similar results are presented for several\nanalogous systems: a pendulum on a cart, a spherical pendulum, and a pendulum\nwith an additional torque control.\n", "title": "On topological obstructions to global stabilization of an inverted pendulum" }
null
null
null
null
true
null
2479
null
Default
null
null
null
{ "abstract": " Refactoring is a maintenance activity that aims to improve design quality\nwhile preserving the behavior of a system. Several (semi)automated approaches\nhave been proposed to support developers in this maintenance activity, based on\nthe correction of anti-patterns, which are \"poor solutions\" to recurring design\nproblems. However, little quantitative evidence exists about the impact of\nautomatically refactored code on program comprehension, and in which context\nautomated refactoring can be as effective as manual refactoring. We performed\nan empirical study to investigate whether the use of automated refactoring\napproaches affects the understandability of systems during comprehension tasks.\n(1) We surveyed 80 developers, asking them to identify from a set of 20\nrefactoring changes if they were generated by developers or by machine, and to\nrate the refactorings according to their design quality; (2) we asked 30\ndevelopers to complete code comprehension tasks on 10 systems that were\nrefactored by either a freelancer or an automated refactoring tool. We measured\ndevelopers' performance using the NASA task load index for their effort; the\ntime that they spent performing the tasks; and their percentages of correct\nanswers. Results show that for 3 out the 5 types of studied anti-patterns,\ndevelopers cannot recognize the origin of the refactoring (i.e., whether it was\nperformed by a human or an automatic tool). We also observe that developers do\nnot prefer human refactorings over automated refactorings, except when\nrefactoring Blob classes; and that there is no statistically significant\ndifference between the impact on code understandability of human refactorings\nand automated refactorings. We conclude that automated refactorings can be as\neffective as manual refactorings. However, for complex anti-patterns types like\nthe Blob, the perceived quality of human refactorings is slightly higher.\n", "title": "Automated Refactoring: Can They Pass The Turing Test?" }
null
null
[ "Computer Science" ]
null
true
null
2480
null
Validated
null
null
null
{ "abstract": " This is the second in a series of papers where we construct an invariant of a\nfour-dimensional piecewise linear manifold $M$ with a given middle cohomology\nclass $h\\in H^2(M,\\mathbb C)$. This invariant is the square root of the torsion\nof unusual chain complex introduced in Part I (arXiv:1605.06498) of our work,\nmultiplied by a correcting factor. Here we find this factor by studying the\nbehavior of our construction under all four-dimensional Pachner moves, and show\nthat it can be represented in a multiplicative form: a product of same-type\nmultipliers over all 2-faces, multiplied by a product of same-type multipliers\nover all pentachora.\n", "title": "Free fermions on a piecewise linear four-manifold. II: Pachner moves" }
null
null
null
null
true
null
2481
null
Default
null
null
null
{ "abstract": " Bayesian inference via standard Markov Chain Monte Carlo (MCMC) methods such\nas Metropolis-Hastings is too computationally intensive to handle large\ndatasets, since the cost per step usually scales like $O(n)$ in the number of\ndata points $n$. We propose the Scalable Metropolis-Hastings (SMH) kernel that\nexploits Gaussian concentration of the posterior to require processing on\naverage only $O(1)$ or even $O(1/\\sqrt{n})$ data points per step. This scheme\nis based on a combination of factorized acceptance probabilities, procedures\nfor fast simulation of Bernoulli processes, and control variate ideas. Contrary\nto many MCMC subsampling schemes such as fixed step-size Stochastic Gradient\nLangevin Dynamics, our approach is exact insofar as the invariant distribution\nis the true posterior and not an approximation to it. We characterise the\nperformance of our algorithm theoretically, and give realistic and verifiable\nconditions under which it is geometrically ergodic. This theory is borne out by\nempirical results that demonstrate overall performance benefits over standard\nMetropolis-Hastings and various subsampling algorithms.\n", "title": "Scalable Metropolis-Hastings for Exact Bayesian Inference with Large Datasets" }
null
null
null
null
true
null
2482
null
Default
null
null
null
{ "abstract": " The kernel trick concept, formulated as an inner product in a feature space,\nfacilitates powerful extensions to many well-known algorithms. While the kernel\nmatrix involves inner products in the feature space, the sample covariance\nmatrix of the data requires outer products. Therefore, their spectral\nproperties are tightly connected. This allows us to examine the kernel matrix\nthrough the sample covariance matrix in the feature space and vice versa. The\nuse of kernels often involves a large number of features, compared to the\nnumber of observations. In this scenario, the sample covariance matrix is not\nwell-conditioned nor is it necessarily invertible, mandating a solution to the\nproblem of estimating high-dimensional covariance matrices under small sample\nsize conditions. We tackle this problem through the use of a shrinkage\nestimator that offers a compromise between the sample covariance matrix and a\nwell-conditioned matrix (also known as the \"target\") with the aim of minimizing\nthe mean-squared error (MSE). We propose a distribution-free kernel matrix\nregularization approach that is tuned directly from the kernel matrix, avoiding\nthe need to address the feature space explicitly. Numerical simulations\ndemonstrate that the proposed regularization is effective in classification\ntasks.\n", "title": "Regularization of the Kernel Matrix via Covariance Matrix Shrinkage Estimation" }
null
null
[ "Statistics" ]
null
true
null
2483
null
Validated
null
null
null
{ "abstract": " The seminal paper of Caponnetto and de Vito (2007) provides minimax-optimal\nrates for kernel ridge regression in a very general setting. Its proof,\nhowever, contains an error in its bound on the effective dimensionality. In\nthis note, we explain the mistake, provide a correct bound, and show that the\nmain theorem remains true.\n", "title": "Fixing an error in Caponnetto and de Vito (2007)" }
null
null
null
null
true
null
2484
null
Default
null
null
null
{ "abstract": " We propose a supervised algorithm for generating type embeddings in the same\nsemantic vector space as a given set of entity embeddings. The algorithm is\nagnostic to the derivation of the underlying entity embeddings. It does not\nrequire any manual feature engineering, generalizes well to hundreds of types\nand achieves near-linear scaling on Big Graphs containing many millions of\ntriples and instances by virtue of an incremental execution. We demonstrate the\nutility of the embeddings on a type recommendation task, outperforming a\nnon-parametric feature-agnostic baseline while achieving 15x speedup and\nnear-constant memory usage on a full partition of DBpedia. Using\nstate-of-the-art visualization, we illustrate the agreement of our\nextensionally derived DBpedia type embeddings with the manually curated domain\nontology. Finally, we use the embeddings to probabilistically cluster about 4\nmillion DBpedia instances into 415 types in the DBpedia ontology.\n", "title": "Supervised Typing of Big Graphs using Semantic Embeddings" }
null
null
[ "Computer Science" ]
null
true
null
2485
null
Validated
null
null
null
{ "abstract": " Network embedding aims to find a way to encode network by learning an\nembedding vector for each node in the network. The network often has property\ninformation which is highly informative with respect to the node's position and\nrole in the network. Most network embedding methods fail to utilize this\ninformation during network representation learning. In this paper, we propose a\nnovel framework, FANE, to integrate structure and property information in the\nnetwork embedding process. In FANE, we design a network to unify heterogeneity\nof the two information sources, and define a new random walking strategy to\nleverage property information and make the two information compensate. FANE is\nconceptually simple and empirically powerful. It improves over the\nstate-of-the-art methods on Cora dataset classification task by over 5%, more\nthan 10% on WebKB dataset classification task. Experiments also show that the\nresults improve more than the state-of-the-art methods as increasing training\nsize. Moreover, qualitative visualization show that our framework is helpful in\nnetwork property information exploration. In all, we present a new way for\nefficiently learning state-of-the-art task-independent representations in\ncomplex attributed networks. The source code and datasets of this paper can be\nobtained from this https URL.\n", "title": "Flexible Attributed Network Embedding" }
null
null
null
null
true
null
2486
null
Default
null
null
null
{ "abstract": " Automated program repair (APR) has attracted widespread attention in recent\nyears with substantial techniques being proposed. Meanwhile, a number of\nbenchmarks have been established for evaluating the performances of APR\ntechniques, among which Defects4J is one of the most wildly used benchmark.\nHowever, bugs in Mockito, a project augmented in a later-version of Defects4J,\ndo not receive much attention by recent researches. In this paper, we aim at\ninvestigating the necessity of considering Mockito bugs when evaluating APR\ntechniques. Our findings show that: 1) Mockito bugs are not more complex for\nrepairing compared with bugs from other projects; 2) the bugs repaired by the\nstate-of-the-art tools share the same repair patterns compared with those\npatterns required to repair Mockito bugs; however, 3) the state-of-the-art\ntools perform poorly on Mockito bugs (Nopol can only correctly fix one bug\nwhile SimFix and CapGen cannot fix any bug in Mockito even if all the buggy\nlocations have been exposed). We conclude from these results that existing APR\ntechniques may be overfitting to their evaluated subjects and we should\nconsider Mockito, or even more bugs from other projects, when evaluating newly\nproposed APR techniques. We further find out a unique repair action required to\nrepair Mockito bugs named external package addition. Importing the external\npackages from the test code associated with the source code is feasible for\nenlarging the search space and this action can be augmented with existing\nrepair actions to advance existing techniques.\n", "title": "Attention Please: Consider Mockito when Evaluating Newly Proposed Automated Program Repair Techniques" }
null
null
null
null
true
null
2487
null
Default
null
null
null
{ "abstract": " This study tries to explain the connection between communication modalities\nand levels of supervision in teleoperation during a dexterous task, like\nsurgery. This concept is applied to two surgical related tasks: incision and\npeg transfer. It was found that as the complexity of the task escalates, the\ncombination linking human supervision with a more expressive modality shows\nbetter performance than other combinations of modalities and control. More\nspecifically, in the peg transfer task, the combination of speech modality and\naction level supervision achieves shorter task completion time (77.1 +- 3.4 s)\nwith fewer mistakes (0.20 +- 0.17 pegs dropped).\n", "title": "Communication Modalities for Supervised Teleoperation in Highly Dexterous Tasks - Does one size fit all?" }
null
null
null
null
true
null
2488
null
Default
null
null
null
{ "abstract": " We introduce a model of anonymous games with the player dependent action\nsets. We propose several learning procedures based on the well-known Fictitious\nPlay and Online Mirror Descent and prove their convergence to equilibrium under\nthe classical monotonicity condition. Typical examples are first-order mean\nfield games.\n", "title": "Learning in anonymous nonatomic games with applications to first-order mean field games" }
null
null
null
null
true
null
2489
null
Default
null
null
null
{ "abstract": " In this work, we study the problem of minimizing the sum of strongly convex\nfunctions split over a network of $n$ nodes. We propose the decentralized and\nasynchronous algorithm ADFS to tackle the case when local functions are\nthemselves finite sums with $m$ components. ADFS converges linearly when local\nfunctions are smooth, and matches the rates of the best known finite sum\nalgorithms when executed on a single machine. On several machines, ADFS enjoys\na $O (\\sqrt{n})$ or $O(n)$ speed-up depending on the leading complexity term as\nlong as the diameter of the network is not too big with respect to $m$. This\nalso leads to a $\\sqrt{m}$ speed-up over state-of-the-art distributed batch\nmethods, which is the expected speed-up for finite sum algorithms. In terms of\ncommunication times and network parameters, ADFS scales as well as optimal\ndistributed batch algorithms. As a side contribution, we give a generalized\nversion of the accelerated proximal coordinate gradient algorithm using\narbitrary sampling that we apply to a well-chosen dual problem to derive ADFS.\nYet, ADFS uses primal proximal updates that only require solving\none-dimensional problems for many standard machine learning applications.\nFinally, ADFS can be formulated for non-smooth objectives with equally good\nscaling properties. We illustrate the improvement of ADFS over state-of-the-art\napproaches with simulations.\n", "title": "Asynchronous Accelerated Proximal Stochastic Gradient for Strongly Convex Distributed Finite Sums" }
null
null
null
null
true
null
2490
null
Default
null
null
null
{ "abstract": " We show that after forming a connected sum with a homotopy sphere, all\n(2j-1)-connected 2j-parallelisable manifolds in dimension 4j+1, j > 0, can be\nequipped with Riemannian metrics of 2-positive Ricci curvature. When j=1 we\nextend the above to certain classes of simply-connected non-spin 5-manifolds.\nThe condition of 2-positive Ricci curvature is defined to mean that the sum of\nthe two smallest eigenvalues of the Ricci tensor is positive at every point.\nThis result is a counterpart to a previous result of the authors concerning the\nexistence of positive Ricci curvature on highly connected manifolds in\ndimensions 4j-1 for j > 1, and in dimensions 4j+1 for j > 0 with torsion-free\ncohomology.\n", "title": "Intermediate curvatures and highly connected manifolds" }
null
null
null
null
true
null
2491
null
Default
null
null
null
{ "abstract": " We introduce a technique that can automatically tune the parameters of a\nrule-based computer vision system comprised of thresholds, combinational logic,\nand time constants. This lets us retain the flexibility and perspicacity of a\nconventionally structured system while allowing us to perform approximate\ngradient descent using labeled data. While this is only a heuristic procedure,\nas far as we are aware there is no other efficient technique for tuning such\nsystems. We describe the components of the system and the associated supervised\nlearning mechanism. We also demonstrate the utility of the algorithm by\ncomparing its performance versus hand tuning for an automotive headlight\ncontroller. Despite having over 100 parameters, the method is able to\nprofitably adjust the system values given just the desired output for a number\nof videos.\n", "title": "Structured Differential Learning for Automatic Threshold Setting" }
null
null
null
null
true
null
2492
null
Default
null
null
null
{ "abstract": " In this paper, we introduce two new non-singular kernel fractional\nderivatives and present a class of other fractional derivatives derived from\nthe new formulations. We present some important results of uniformly convergent\nsequences of continuous functions, in particular the Comparison's principle,\nand others that allow, the study of the limitation of fractional nonlinear\ndifferential equations.\n", "title": "A new fractional derivative of variable order with non-singular kernel and fractional differential equations" }
null
null
null
null
true
null
2493
null
Default
null
null
null
{ "abstract": " In several domains obtaining class annotations is expensive while at the same\ntime unlabelled data are abundant. While most semi-supervised approaches\nenforce restrictive assumptions on the data distribution, recent work has\nmanaged to learn semi-supervised models in a non-restrictive regime. However,\nso far such approaches have only been proposed for linear models. In this work,\nwe introduce semi-supervised parameter learning for Sum-Product Networks\n(SPNs). SPNs are deep probabilistic models admitting inference in linear time\nin number of network edges. Our approach has several advantages, as it (1)\nallows generative and discriminative semi-supervised learning, (2) guarantees\nthat adding unlabelled data can increase, but not degrade, the performance\n(safe), and (3) is computationally efficient and does not enforce restrictive\nassumptions on the data distribution. We show on a variety of data sets that\nsafe semi-supervised learning with SPNs is competitive compared to\nstate-of-the-art and can lead to a better generative and discriminative\nobjective value than a purely supervised approach.\n", "title": "Safe Semi-Supervised Learning of Sum-Product Networks" }
null
null
null
null
true
null
2494
null
Default
null
null
null
{ "abstract": " The number of studies for the analysis of remote sensing images has been\ngrowing exponentially in the last decades. Many studies, however, only report\nresults---in the form of certain performance metrics---by a few selected\nalgorithms on a training and testing sample. While this often yields valuable\ninsights, it tells little about some important aspects. For example, one might\nbe interested in understanding the nature of a study by the interaction of\nalgorithm, features, and the sample as these collectively contribute to the\noutcome; among these three, which would be a more productive direction in\nimproving a study; how to assess the sample quality or the value of a set of\nfeatures etc. With a focus on land-use classification, we advocate the use of a\nstructured analysis. The output of a study is viewed as the result of the\ninterplay among three input dimensions: feature, sample, and algorithm.\nSimilarly, another dimension, the error, can be decomposed into error along\neach input dimension. Such a structural decomposition of the inputs or error\ncould help better understand the nature of the problem and potentially suggest\ndirections for improvement. We use the analysis of a remote sensing image at a\nstudy site in Guangzhou, China, to demonstrate how such a structured analysis\ncould be carried out and what insights it generates. The structured analysis\ncould be applied to a new study, or as a diagnosis to an existing one. We\nexpect this will inform practice in the analysis of remote sensing images, and\nhelp advance the state-of-the-art of land-use classification.\n", "title": "A Structured Approach to the Analysis of Remote Sensing Images" }
null
null
[ "Statistics" ]
null
true
null
2495
null
Validated
null
null
null
{ "abstract": " The multi-armed restless bandit problem is studied in the case where the\npay-off distributions are stationary $\\varphi$-mixing. This version of the\nproblem provides a more realistic model for most real-world applications, but\ncannot be optimally solved in practice, since it is known to be PSPACE-hard.\nThe objective of this paper is to characterize a sub-class of the problem where\n{\\em good} approximate solutions can be found using tractable approaches.\nSpecifically, it is shown that under some conditions on the $\\varphi$-mixing\ncoefficients, a modified version of UCB can prove effective. The main challenge\nis that, unlike in the i.i.d. setting, the distributions of the sampled\npay-offs may not have the same characteristics as those of the original bandit\narms. In particular, the $\\varphi$-mixing property does not necessarily carry\nover. This is overcome by carefully controlling the effect of a sampling policy\non the pay-off distributions. Some of the proof techniques developed in this\npaper can be more generally used in the context of online sampling under\ndependence. Proposed algorithms are accompanied with corresponding regret\nanalysis.\n", "title": "Approximations of the Restless Bandit Problem" }
null
null
null
null
true
null
2496
null
Default
null
null
null
{ "abstract": " Iteratively reweighted $\\ell_1$ algorithm is a popular algorithm for solving\na large class of optimization problems whose objective is the sum of a\nLipschitz differentiable loss function and a possibly nonconvex sparsity\ninducing regularizer. In this paper, motivated by the success of extrapolation\ntechniques in accelerating first-order methods, we study how widely used\nextrapolation techniques such as those in [4,5,22,28] can be incorporated to\npossibly accelerate the iteratively reweighted $\\ell_1$ algorithm. We consider\nthree versions of such algorithms. For each version, we exhibit an explicitly\ncheckable condition on the extrapolation parameters so that the sequence\ngenerated provably clusters at a stationary point of the optimization problem.\nWe also investigate global convergence under additional Kurdyka-$\\L$ojasiewicz\nassumptions on certain potential functions. Our numerical experiments show that\nour algorithms usually outperform the general iterative shrinkage and\nthresholding algorithm in [21] and an adaptation of the iteratively reweighted\n$\\ell_1$ algorithm in [23, Algorithm 7] with nonmonotone line-search for\nsolving random instances of log penalty regularized least squares problems in\nterms of both CPU time and solution quality.\n", "title": "Iteratively reweighted $\\ell_1$ algorithms with extrapolation" }
null
null
[ "Statistics" ]
null
true
null
2497
null
Validated
null
null
null
{ "abstract": " In object oriented software development, the analysis modeling is concerned\nwith the task of identifying problem level objects along with the relationships\nbetween them from software requirements. The software requirements are usually\nwritten in some natural language, and the analysis modeling is normally\nperformed by experienced human analysts. The huge gap between the software\nrequirements which are unstructured texts and analysis models which are usually\nstructured UML diagrams, along with human slip-ups inevitably makes the\ntransformation process error prone. The automation of this process can help in\nreducing the errors in the transformation. In this paper we propose a tool\nsupported approach for automated transformation of use case specifications\ndocumented in English language into analysis class diagrams. The approach works\nin four steps. It first takes the textual specification of a use case as input,\nand then using a natural language parser generates type dependencies and parts\nof speech tags for each sentence in the specification. Then, it identifies the\nsentence structure of each sentence using a set of comprehensive sentence\nstructure rules. Next, it applies a set of transformation rules on the type\ndependencies and parts of speech tags of the sentences to discover the problem\nlevel objects and the relationships between them. Finally, it generates and\nvisualizes the analysis class diagram. We conducted a controlled experiment to\ncompare the correctness, completeness and redundancy of the analysis class\ndiagrams generated by our approach with those generated by the existing\nautomated approaches. The results showed that the analysis class diagrams\ngenerated by our approach were more correct, more complete, and less redundant\nthan those generated by the other approaches.\n", "title": "Automatic generation of analysis class diagrams from use case specifications" }
null
null
[ "Computer Science" ]
null
true
null
2498
null
Validated
null
null
null
{ "abstract": " This Perspective provides examples of current and future applications of deep\nlearning in pharmacogenomics, including: (1) identification of novel regulatory\nvariants located in noncoding domains and their function as applied to\npharmacoepigenomics; (2) patient stratification from medical records; and (3)\nprediction of drugs, targets, and their interactions. Deep learning\nencapsulates a family of machine learning algorithms that over the last decade\nhas transformed many important subfields of artificial intelligence (AI) and\nhas demonstrated breakthrough performance improvements on a wide range of tasks\nin biomedicine. We anticipate that in the future deep learning will be widely\nused to predict personalized drug response and optimize medication selection\nand dosing, using knowledge extracted from large and complex molecular,\nepidemiological, clinical, and demographic datasets.\n", "title": "Deep Learning in Pharmacogenomics: From Gene Regulation to Patient Stratification" }
null
null
null
null
true
null
2499
null
Default
null
null
null
{ "abstract": " An explicit description of the virtualization map for the (modified) Nakajima\nmonomial model for crystals is given. We give an explicit description of the\nLusztig data for modified Nakajima monomials in type $A_n$.\n", "title": "Virtual Crystals and Nakajima Monomials" }
null
null
null
null
true
null
2500
null
Default
null
null