text
null
inputs
dict
prediction
null
prediction_agent
null
annotation
list
annotation_agent
null
multi_label
bool
1 class
explanation
null
id
stringlengths
1
5
metadata
null
status
stringclasses
2 values
event_timestamp
null
metrics
null
null
{ "abstract": " Deep learning has started to revolutionize several different industries, and\nthe applications of these methods in medicine are now becoming more\ncommonplace. This study focuses on investigating the feasibility of tracking\npatients and clinical staff wearing Bluetooth Low Energy (BLE) tags in a\nradiation oncology clinic using artificial neural networks (ANNs) and\nconvolutional neural networks (CNNs). The performance of these networks was\ncompared to relative received signal strength indicator (RSSI) thresholding and\ntriangulation. By utilizing temporal information, a combined CNN+ANN network\nwas capable of correctly identifying the location of the BLE tag with an\naccuracy of 99.9%. It outperformed a CNN model (accuracy = 94%), a thresholding\nmodel employing majority voting (accuracy = 95%), and a triangulation\nclassifier utilizing majority voting (accuracy = 95%). Future studies will seek\nto deploy this affordable real time location system in hospitals to improve\nclinical workflow, efficiency, and patient safety.\n", "title": "Accurate Real Time Localization Tracking in A Clinical Environment using Bluetooth Low Energy and Deep Learning" }
null
null
null
null
true
null
13801
null
Default
null
null
null
{ "abstract": " Monte Carlo (MC) sampling methods are widely applied in Bayesian inference,\nsystem simulation and optimization problems. The Markov Chain Monte Carlo\n(MCMC) algorithms are a well-known class of MC methods which generate a Markov\nchain with the desired invariant distribution. In this document, we focus on\nthe Metropolis-Hastings (MH) sampler, which can be considered as the atom of\nthe MCMC techniques, introducing the basic notions and different properties. We\ndescribe in details all the elements involved in the MH algorithm and the most\nrelevant variants. Several improvements and recent extensions proposed in the\nliterature are also briefly discussed, providing a quick but exhaustive\noverview of the current Metropolis-based sampling's world.\n", "title": "Metropolis Sampling" }
null
null
[ "Statistics" ]
null
true
null
13802
null
Validated
null
null
null
{ "abstract": " In this paper we describe a novel local algorithm for large statistical\nswarms using \"harmonic attractor dynamics\", by means of which a swarm can\nconstruct harmonics of the environment. This in turn allows the swarm to\napproximately reconstruct desired structures in the environment. The robots\nnavigate in a discrete environment, completely free of localization, being able\nto communicate with other robots in its own discrete cell only, and being able\nto sense or take reliable action within a disk of radius $r$ around itself. We\npresent the mathematics that underlie such dynamics and present initial results\ndemonstrating the proposed algorithm.\n", "title": "Approximate Structure Construction Using Large Statistical Swarms" }
null
null
null
null
true
null
13803
null
Default
null
null
null
{ "abstract": " This book chapter introduces to the problem to which extent search strategies\nof foraging biological organisms can be identified by statistical data analysis\nand mathematical modeling. A famous paradigm in this field is the Levy Flight\nHypothesis: It states that under certain mathematical conditions Levy flights,\nwhich are a key concept in the theory of anomalous stochastic processes,\nprovide an optimal search strategy. This hypothesis may be understood\nbiologically as the claim that Levy flights represent an evolutionary adaptive\noptimal search strategy for foraging organisms. Another interpretation,\nhowever, is that Levy flights emerge from the interaction between a forager and\na given (scale-free) distribution of food sources. These hypotheses are\ndiscussed controversially in the current literature. We give examples and\ncounterexamples of experimental data and their analyses supporting and\nchallenging them.\n", "title": "Search for Food of Birds, Fish and Insects" }
null
null
null
null
true
null
13804
null
Default
null
null
null
{ "abstract": " Obtaining detailed and reliable data about local economic livelihoods in\ndeveloping countries is expensive, and data are consequently scarce. Previous\nwork has shown that it is possible to measure local-level economic livelihoods\nusing high-resolution satellite imagery. However, such imagery is relatively\nexpensive to acquire, often not updated frequently, and is mainly available for\nrecent years. We train CNN models on free and publicly available multispectral\ndaytime satellite images of the African continent from the Landsat 7 satellite,\nwhich has collected imagery with global coverage for almost two decades. We\nshow that despite these images' lower resolution, we can achieve accuracies\nthat exceed previous benchmarks.\n", "title": "Poverty Prediction with Public Landsat 7 Satellite Imagery and Machine Learning" }
null
null
null
null
true
null
13805
null
Default
null
null
null
{ "abstract": " We study implicit regularization when optimizing an underdetermined quadratic\nobjective over a matrix $X$ with gradient descent on a factorization of $X$. We\nconjecture and provide empirical and theoretical evidence that with small\nenough step sizes and initialization close enough to the origin, gradient\ndescent on a full dimensional factorization converges to the minimum nuclear\nnorm solution.\n", "title": "Implicit Regularization in Matrix Factorization" }
null
null
null
null
true
null
13806
null
Default
null
null
null
{ "abstract": " We study the Pareto frontier for two competing norms $\\|\\cdot\\|_X$ and\n$\\|\\cdot\\|_Y$ on a vector space. For a given vector $c$, the pareto frontier\ndescribes the possible values of $(\\|a\\|_X,\\|b\\|_Y)$ for a decomposition\n$c=a+b$. The singular value decomposition of a matrix is closely related to the\nPareto frontier for the spectral and nuclear norm. We will develop a general\ntheory that extends the notion of singular values of a matrix to arbitrary\nfinite dimensional euclidean vector spaces equipped with dual norms. This also\ngeneralizes the diagonal singular value decompositions for tensors introduced\nby the author in previous work. We can apply the results to denoising, where\n$c$ is a noisy signal, $a$ is a sparse signal and $b$ is noise. Applications\ninclude 1D total variation denoising, 2D total variation Rudin-Osher-Fatemi\nimage denoising, LASSO, basis pursuit denoising and tensor decompositions.\n", "title": "A general theory of singular values with applications to signal denoising" }
null
null
null
null
true
null
13807
null
Default
null
null
null
{ "abstract": " Deep Neural Networks are increasingly being used in a variety of machine\nlearning applications applied to user data on the cloud. However, this approach\nintroduces a number of privacy and efficiency challenges, as the cloud operator\ncan perform secondary inferences on the available data. Recently, advances in\nedge processing have paved the way for more efficient, and private, data\nprocessing at the source for simple tasks and lighter models, though they\nremain a challenge for larger, and more complicated models. In this paper, we\npresent a hybrid approach for breaking down large, complex deep models for\ncooperative, privacy-preserving analytics. We do this by breaking down the\npopular deep architectures and fine-tune them in a suitable way. We then\nevaluate the privacy benefits of this approach based on the information exposed\nto the cloud service. We also assess the local inference cost of different\nlayers on a modern handset for mobile applications. Our evaluations show that\nby using certain kind of fine-tuning and embedding techniques and at a small\nprocessing cost, we can greatly reduce the level of information available to\nunintended tasks applied to the data features on the cloud, and hence achieving\nthe desired tradeoff between privacy and performance.\n", "title": "A Hybrid Deep Learning Architecture for Privacy-Preserving Mobile Analytics" }
null
null
null
null
true
null
13808
null
Default
null
null
null
{ "abstract": " The CERN IT provides a set of Hadoop clusters featuring more than 5 PBytes of\nraw storage with different open-source, user-level tools available for\nanalytical purposes. The CMS experiment started collecting a large set of\ncomputing meta-data, e.g. dataset, file access logs, since 2015. These records\nrepresent a valuable, yet scarcely investigated, set of information that needs\nto be cleaned, categorized and analyzed. CMS can use this information to\ndiscover useful patterns and enhance the overall efficiency of the distributed\ndata, improve CPU and site utilization as well as tasks completion time. Here\nwe present evaluation of Apache Spark platform for CMS needs. We discuss two\nmain use-cases CMS analytics and ML studies where efficient process billions of\nrecords stored on HDFS plays an important role. We demonstrate that both Scala\nand Python (PySpark) APIs can be successfully used to execute extremely I/O\nintensive queries and provide valuable data insight from collected meta-data.\n", "title": "Exploiting Apache Spark platform for CMS computing analytics" }
null
null
[ "Physics" ]
null
true
null
13809
null
Validated
null
null
null
{ "abstract": " Evolution sculpts both the body plans and nervous systems of agents together\nover time. In contrast, in AI and robotics, a robot's body plan is usually\ndesigned by hand, and control policies are then optimized for that fixed\ndesign. The task of simultaneously co-optimizing the morphology and controller\nof an embodied robot has remained a challenge. In psychology, the theory of\nembodied cognition posits that behavior arises from a close coupling between\nbody plan and sensorimotor control, which suggests why co-optimizing these two\nsubsystems is so difficult: most evolutionary changes to morphology tend to\nadversely impact sensorimotor control, leading to an overall decrease in\nbehavioral performance. Here, we further examine this hypothesis and\ndemonstrate a technique for \"morphological innovation protection\", which\ntemporarily reduces selection pressure on recently morphologically-changed\nindividuals, thus enabling evolution some time to \"readapt\" to the new\nmorphology with subsequent control policy mutations. We show the potential for\nthis method to avoid local optima and converge to similar highly fit\nmorphologies across widely varying initial conditions, while sustaining fitness\nimprovements further into optimization. While this technique is admittedly only\nthe first of many steps that must be taken to achieve scalable optimization of\nembodied machines, we hope that theoretical insight into the cause of\nevolutionary stagnation in current methods will help to enable the automation\nof robot design and behavioral training -- while simultaneously providing a\ntestbed to investigate the theory of embodied cognition.\n", "title": "Scalable Co-Optimization of Morphology and Control in Embodied Machines" }
null
null
null
null
true
null
13810
null
Default
null
null
null
{ "abstract": " It has been known since Ehrhard and Regnier's seminal work on the Taylor\nexpansion of {\\lambda}-terms that this operation commutes with normalization:\nthe expansion of a {\\lambda}-term is always normalizable and its normal form is\nthe expansion of the Böhm tree of the term. We generalize this result to the\nnon-uniform setting of the algebraic {\\lambda}-calculus, i.e.\n{\\lambda}-calculus extended with linear combinations of terms. This requires us\nto tackle two difficulties: foremost is the fact that Ehrhard and Regnier's\ntechniques rely heavily on the uniform, deterministic nature of the ordinary\n{\\lambda}-calculus, and thus cannot be adapted; second is the absence of any\nsatisfactory generic extension of the notion of Böhm tree in presence of\nquantitative non-determinism, which is reflected by the fact that the Taylor\nexpansion of an algebraic {\\lambda}-term is not always normalizable. Our\nsolution is to provide a fine grained study of the dynamics of\n{\\beta}-reduction under Taylor expansion, by introducing a notion of reduction\non resource vectors, i.e. infinite linear combinations of resource\n{\\lambda}-terms. The latter form the multilinear fragment of the differential\n{\\lambda}-calculus, and resource vectors are the target of the Taylor expansion\nof {\\lambda}-terms. We show the reduction of resource vectors contains the\nimage of any {\\beta}-reduction step, from which we deduce that Taylor expansion\nand normalization commute on the nose. We moreover identify a class of\nalgebraic {\\lambda}-terms, encompassing both normalizable algebraic\n{\\lambda}-terms and arbitrary ordinary {\\lambda}-terms: the expansion of these\nis always normalizable, which guides the definition of a generalization of\nBöhm trees to this setting.\n", "title": "Normalizing the Taylor expansion of non-deterministic λ-terms, via parallel reduction of resource vectors" }
null
null
[ "Computer Science" ]
null
true
null
13811
null
Validated
null
null
null
{ "abstract": " We propose and apply two methods to estimate pupil plane phase\ndiscontinuities for two realistic scenarios on VLT and Keck. The methods use\nboth Phase Diversity and a form of image sharpening. For the case of VLT, we\nsimulate the `low wind effect' (LWE) which is responsible for focal plane\nerrors in the SPHERE system in low wind and good seeing conditions. We\nsuccessfully estimate the simulated LWE using both methods, and show that they\nare complimentary to one another. We also demonstrate that single image Phase\nDiversity (also known as Phase Retrieval with diversity) is also capable of\nestimating the simulated LWE when using the natural de-focus on the SPHERE/DTTS\nimager. We demonstrate that Phase Diversity can estimate the LWE to within 30\nnm RMS WFE, which is within the allowable tolerances to achieve a target SPHERE\ncontrast of 10$^{-6}$. Finally, we simulate 153 nm RMS of piston errors on the\nmirror segments of Keck and produce NIRC2 images subject to these effects. We\nshow that a single, diverse image with 1.5 waves (PV) of focus can be used to\nestimate this error to within 29 nm RMS WFE, and a perfect correction of our\nestimation would increase the Strehl ratio of a NIRC2 image by 12\\%\n", "title": "Quantifying telescope phase discontinuities external to AO-systems by use of Phase Diversity and Focal Plane Sharpening" }
null
null
[ "Physics" ]
null
true
null
13812
null
Validated
null
null
null
{ "abstract": " We analyse a multilevel Monte Carlo method for the approximation of\ndistribution functions of univariate random variables. Since, by assumption,\nthe target distribution is not known explicitly, approximations have to be\nused. We provide an asymptotic analysis of the error and the cost of the\nalgorithm. Furthermore we construct an adaptive version of the algorithm that\ndoes not require any a priori knowledge on weak or strong convergence rates. We\napply the adaptive algorithm to smooth path-independent and path-dependent\nfunctionals and to stopped exit times of SDEs.\n", "title": "Adaptive Multilevel Monte Carlo Approximation of Distribution Functions" }
null
null
null
null
true
null
13813
null
Default
null
null
null
{ "abstract": " We provide a fast method for computing constraints on impactor pre-impact\norbits, applying this to the late giant impacts in the Solar System. These\nconstraints can be used to make quick, broad comparisons of different collision\nscenarios, identifying some immediately as low-probability events, and\nnarrowing the parameter space in which to target follow-up studies with\nexpensive N-body simulations. We benchmark our parameter space predictions,\nfinding good agreement with existing N-body studies for the Moon. We suggest\nthat high-velocity impact scenarios in the inner Solar System, including all\ncurrently proposed single impact scenarios for the formation of Mercury, should\nbe disfavoured. This leaves a multiple hit-and-run scenario as the most\nprobable currently proposed for the formation of Mercury.\n", "title": "Constraints on the pre-impact orbits of Solar System giant impactors" }
null
null
null
null
true
null
13814
null
Default
null
null
null
{ "abstract": " We present a deep radio search in the Reticulum II dwarf spheroidal (dSph)\ngalaxy performed with the Australia Telescope Compact Array. Observations were\nconducted at 16 cm wavelength, with an rms sensitivity of 0.01 mJy/beam, and\nwith the goal of searching for synchrotron emission induced by annihilation or\ndecay of weakly interacting massive particles (WIMPs). Data were complemented\nwith observations on large angular scales taken with the KAT-7 telescope. We\nfind no evidence for a diffuse emission from the dSph and we derive competitive\nbounds on the WIMP properties. In addition, we detect more than 200 new\nbackground radio sources. Among them, we show there are two compelling\ncandidates for being the radio counterpart of the possible gamma-ray emission\nreported by other groups using Fermi-LAT data.\n", "title": "Dark matter in the Reticulum II dSph: a radio search" }
null
null
[ "Physics" ]
null
true
null
13815
null
Validated
null
null
null
{ "abstract": " This paper investigates the problem of detecting relevant change points in\nthe mean vector, say $\\mu_t =(\\mu_{1,t},\\ldots ,\\mu_{d,t})^T$ of a high\ndimensional time series $(Z_t)_{t\\in \\mathbb{Z}}$.\nWhile the recent literature on testing for change points in this context\nconsiders hypotheses for the equality of the means $\\mu_h^{(1)}$ and\n$\\mu_h^{(2)}$ before and after the change points in the different components,\nwe are interested in a null hypothesis of the form $$ H_0: |\\mu^{(1)}_{h} -\n\\mu^{(2)}_{h} | \\leq \\Delta_h ~~~\\mbox{ for all } ~~h=1,\\ldots ,d $$ where\n$\\Delta_1, \\ldots , \\Delta_d$ are given thresholds for which a smaller\ndifference of the means in the $h$-th component is considered to be\nnon-relevant.\nWe propose a new test for this problem based on the maximum of squared and\nintegrated CUSUM statistics and investigate its properties as the sample size\n$n$ and the dimension $d$ both converge to infinity. In particular, using\nGaussian approximations for the maximum of a large number of dependent random\nvariables, we show that on certain points of the boundary of the null\nhypothesis a standardised version of the maximum converges weakly to a Gumbel\ndistribution.\n", "title": "Relevant change points in high dimensional time series" }
null
null
[ "Mathematics", "Statistics" ]
null
true
null
13816
null
Validated
null
null
null
{ "abstract": " Learning representations that disentangle the underlying factors of\nvariability in data is an intuitive precursor to AI with human-like reasoning.\nConsequently, it has been the object of many efforts of the machine learning\ncommunity. This work takes a step further in this direction by addressing the\nscenario where generative factors present a multimodal distribution due to the\nexistence of class distinction in the data. We formulate a lower bound on the\njoint distribution of inputs and class labels and present N-VAE, a model which\nis capable of separating factors of variation which are exclusive to certain\nclasses from factors that are shared among classes. This model implements the\nnatural clustering prior through the use of a class-conditioned latent space\nand a shared latent space. We show its usefulness for detecting and\ndisentangling class-dependent generative factors as well as for generating rich\nartificial samples.\n", "title": "Disentangling in Variational Autoencoders with Natural Clustering" }
null
null
[ "Computer Science", "Statistics" ]
null
true
null
13817
null
Validated
null
null
null
{ "abstract": " Excellent ranking power along with well calibrated probability estimates are\nneeded in many classification tasks. In this paper, we introduce a technique,\nCalibrated Boosting-Forest that captures both. This novel technique is an\nensemble of gradient boosting machines that can support both continuous and\nbinary labels. While offering superior ranking power over any individual\nregression or classification model, Calibrated Boosting-Forest is able to\npreserve well calibrated posterior probabilities. Along with these benefits, we\nprovide an alternative to the tedious step of tuning gradient boosting\nmachines. We demonstrate that tuning Calibrated Boosting-Forest can be reduced\nto a simple hyper-parameter selection. We further establish that increasing\nthis hyper-parameter improves the ranking performance under a diminishing\nreturn. We examine the effectiveness of Calibrated Boosting-Forest on\nligand-based virtual screening where both continuous and binary labels are\navailable and compare the performance of Calibrated Boosting-Forest with\nlogistic regression, gradient boosting machine and deep learning. Calibrated\nBoosting-Forest achieved an approximately 48% improvement compared to a\nstate-of-art deep learning model. Moreover, it achieved around 95% improvement\non probability quality measurement compared to the best individual gradient\nboosting machine. Calibrated Boosting-Forest offers a benchmark demonstration\nthat in the field of ligand-based virtual screening, deep learning is not the\nuniversally dominant machine learning model and good calibrated probabilities\ncan better facilitate virtual screening process.\n", "title": "Calibrated Boosting-Forest" }
null
null
null
null
true
null
13818
null
Default
null
null
null
{ "abstract": " The low-temperature magnetic phases in the layered honeycomb lattice material\n$\\alpha$-RuCl$_3$ have been studied as a function of in-plane magnetic field.\nIn zero field this material orders magnetically below 7 K with so-called zigzag\norder within the honeycomb planes. Neutron diffraction data show that a\nrelatively small applied field of 2 T is sufficient to suppress the population\nof the magnetic domain in which the zigzag chains run along the field\ndirection. We found that the intensity of the magnetic peaks due to zigzag\norder is continuously suppressed with increasing field until their\ndisappearance at $\\mu_o$H$_c$=8 T. At still higher fields (above 8 T) the\nzigzag order is destroyed, while bulk magnetization and heat capacity\nmeasurements suggest that the material enters a state with gapped magnetic\nexcitations. We discuss the magnetic phase diagram obtained in our study in the\ncontext of a quantum phase transition.\n", "title": "Phase Diagram of $α$-RuCl$_3$ in an in-plane Magnetic Field" }
null
null
null
null
true
null
13819
null
Default
null
null
null
{ "abstract": " We introduce a general methodology for post hoc inference in a large-scale\nmultiple testing framework. The approach is called \"user-agnostic\" in the sense\nthat the statistical guarantee on the number of correct rejections holds for\nany set of candidate items selected by the user (after having seen the data).\nThis task is investigated by defining a suitable criterion, named the\njoint-family-wise-error rate (JER for short). We propose several procedures for\ncontrolling the JER, with a special focus on incorporating dependencies while\nadapting to the unknown quantity of signal (via a step-down approach). We show\nthat our proposed setting incorporates as particular cases a version of the\nhigher criticism as well as the closed testing based approach of Goeman and\nSolari (2011). Our theoretical statements are supported by numerical\nexperiments.\n", "title": "Post hoc inference via joint family-wise error rate control" }
null
null
null
null
true
null
13820
null
Default
null
null
null
{ "abstract": " We design jamming resistant receivers to enhance the robustness of a massive\nMIMO uplink channel against jamming. In the pilot phase, we estimate not only\nthe desired channel, but also the jamming channel by exploiting purposely\nunused pilot sequences. The jamming channel estimate is used to construct the\nlinear receive filter to reduce impact that jamming has on the achievable\nrates. The performance of the proposed scheme is analytically and numerically\nevaluated. These results show that the proposed scheme greatly improves the\nrates, as compared to conventional receivers. Moreover, the proposed schemes\nstill work well with stronger jamming power.\n", "title": "Jamming Resistant Receivers for Massive MIMO" }
null
null
[ "Computer Science" ]
null
true
null
13821
null
Validated
null
null
null
{ "abstract": " We discuss higher dimensional generalizations of the 0+1-dimensional\nSachdev-Ye-Kitaev (SYK) model that has recently become the focus of intensive\ninterdisciplinary studies by, both, the condensed matter and field-theoretical\ncommunities. Unlike the previous constructions where multiple SYK copies would\nbe coupled to each other and/or hybridized with itinerant fermions via\nspatially short-ranged random hopping processes, we study algebraically varying\nlong-range (spatially and/or temporally) correlated random couplings in the\ngeneral d+1 dimensions. Such pertinent topics as translationally-invariant\nstrong-coupling solutions, emergent reparametrization symmetry, effective\naction for fluctuations, chaotic behavior, and diffusive transport (or a lack\nthereof) are all addressed. We find that the most appealing properties of the\noriginal SYK model that suggest the existence of its 1+1-dimensional\nholographic gravity dual do not survive the aforementioned generalizations,\nthus lending no additional support to the hypothetical broad (including\n'non-AdS/non-CFT') holographic correspondence.\n", "title": "Thickening and sickening the SYK model" }
null
null
null
null
true
null
13822
null
Default
null
null
null
{ "abstract": " Realistic implementations of the Kitaev chain require, in general, the\nintroduction of extra internal degrees of freedom. In the present work, we\ndiscuss the presence of hidden BDI symmetries for free Hamiltonians describing\nsystems with an arbitrary number of internal degrees of freedom. We generalize\nresults of a spinfull Kitaev chain to construct a Hamiltonian with $n$ internal\ndegrees of freedom and obtain the corresponding hidden chiral symmetry. As an\nexplicit application of this generalized result, we exploit by analytical and\nnumerical calculations the case of a spinful 2-band Kitaev chain, which can\nhost up to 4 Majorana bound states. We also observe the appearence of minigap\nstates, when chiral symmetry is broken.\n", "title": "Hidden chiral symmetries in BDI multichannel Kitaev chains" }
null
null
null
null
true
null
13823
null
Default
null
null
null
{ "abstract": " A half-century after the discovery of the superconductor-insulator transition\n(SIT), one of the fundamental predictions of the theory, the charge\nBerezinskii-Kosterlitz-Thouless (BKT) transition that is expected to occur at\nthe insulating side of the SIT, has remained unobserved. The charge BKT\ntransition is a phenomenon dual to the vortex BKT transition, which is at the\nheart of the very existence of two-dimensional superconductivity as a\nzero-resistance state appearing at finite temperatures. The dual picture points\nto the possibility of the existence of a superinsulating state endowed with\nzero conductance at finite temperature. Here, we report the observation of the\ncharge BKT transition on the insulating side of the SIT, identified by the\ncritical behavior of the resistance. We find that the critical temperature of\nthe charge BKT transition depends on the magnetic field exhibiting first the\nfast growth and then passing through the maximum at fields much less than the\nupper critical field. Finally, we ascertain the effects of the finite\nelectrostatic screening length and its divergence at the magnetic field-tuned\napproach to the superconductor-insulator transition.\n", "title": "Charge Berezinskii-Kosterlitz-Thouless transition in superconducting NbTiN films" }
null
null
null
null
true
null
13824
null
Default
null
null
null
{ "abstract": " Geo-social data has been an attractive source for a variety of problems such\nas mining mobility patterns, link prediction, location recommendation, and\ninfluence maximization. However, new geo-social data is increasingly\nunavailable and suffers several limitations. In this paper, we aim to remedy\nthe problem of effective data extraction from geo-social data sources. We first\nidentify and categorize the limitations of extracting geo-social data. In order\nto overcome the limitations, we propose a novel seed-driven approach that uses\nthe points of one source as the seed to feed as queries for the others. We\nadditionally handle differences between, and dynamics within the sources by\nproposing three variants for optimizing search radius. Furthermore, we provide\nan optimization based on recursive clustering to minimize the number of\nrequests and an adaptive procedure to learn the specific data distribution of\neach source. Our comprehensive experiments with six popular sources show that\nour seed-driven approach yields 14.3 times more data overall, while our\nrequest-optimized algorithm retrieves up to 95% of the data with less than 16%\nof the requests. Thus, our proposed seed-driven approach set new standards for\neffective and efficient extraction of geo-social data.\n", "title": "Seed-Driven Geo-Social Data Extraction - Full Version" }
null
null
null
null
true
null
13825
null
Default
null
null
null
{ "abstract": " The random-effects or normal-normal hierarchical model is commonly utilized\nin a wide range of meta-analysis applications. A Bayesian approach to inference\nis very attractive in this context, especially when a meta-analysis is based\nonly on few studies. The bayesmeta R package provides readily accessible tools\nto perform Bayesian meta-analyses and generate plots and summaries, without\nhaving to worry about computational details. It allows for flexible prior\nspecification and instant access to the resulting posterior distributions,\nincluding prediction and shrinkage estimation, and facilitating for example\nquick sensitivity checks. The present paper introduces the underlying theory\nand showcases its usage.\n", "title": "Bayesian random-effects meta-analysis using the bayesmeta R package" }
null
null
null
null
true
null
13826
null
Default
null
null
null
{ "abstract": " In this paper, we continue our previous work on the Dirichlet mixture model\n(DMM)-based VQ to derive the performance bound of the LSF VQ. The LSF\nparameters are transformed into the $\\Delta$LSF domain and the underlying\ndistribution of the $\\Delta$LSF parameters are modelled by a DMM with finite\nnumber of mixture components. The quantization distortion, in terms of the mean\nsquared error (MSE), is calculated with the high rate theory. The mapping\nrelation between the perceptually motivated log spectral distortion (LSD) and\nthe MSE is empirically approximated by a polynomial. With this mapping\nfunction, the minimum required bit rate for transparent coding of the LSF is\nestimated.\n", "title": "Dirichlet Mixture Model based VQ Performance Prediction for Line Spectral Frequency" }
null
null
[ "Computer Science", "Statistics" ]
null
true
null
13827
null
Validated
null
null
null
{ "abstract": " We consider the problem of approximate $K$-means clustering with outliers and\nside information provided by same-cluster queries and possibly noisy answers.\nOur solution shows that, under some mild assumptions on the smallest cluster\nsize, one can obtain an $(1+\\epsilon)$-approximation for the optimal potential\nwith probability at least $1-\\delta$, where $\\epsilon>0$ and $\\delta\\in(0,1)$,\nusing an expected number of $O(\\frac{K^3}{\\epsilon \\delta})$ noiseless\nsame-cluster queries and comparison-based clustering of complexity $O(ndK +\n\\frac{K^3}{\\epsilon \\delta})$, here, $n$ denotes the number of points and $d$\nthe dimension of space. Compared to a handful of other known approaches that\nperform importance sampling to account for small cluster sizes, the proposed\nquery technique reduces the number of queries by a factor of roughly\n$O(\\frac{K^6}{\\epsilon^3})$, at the cost of possibly missing very small\nclusters. We extend this settings to the case where some queries to the oracle\nproduce erroneous information, and where certain points, termed outliers, do\nnot belong to any clusters. Our proof techniques differ from previous methods\nused for $K$-means clustering analysis, as they rely on estimating the sizes of\nthe clusters and the number of points needed for accurate centroid estimation\nand subsequent nontrivial generalizations of the double Dixie cup problem. We\nillustrate the performance of the proposed algorithm both on synthetic and real\ndatasets, including MNIST and CIFAR $10$.\n", "title": "Query K-means Clustering and the Double Dixie Cup Problem" }
null
null
null
null
true
null
13828
null
Default
null
null
null
{ "abstract": " Here, we suggest a method to represent general directed uniform and\nnon-uniform hypergraphs by different connectivity tensors. We show many results\non spectral properties of undirected hypergraphs also hold for general directed\nuniform hypergraphs. Our representation of a connectivity tensor will be very\nuseful for the further development in spectral theory of directed hypergraphs.\nAt the end, we have also introduced the concept of weak* irreducible\nhypermatrix to better explain connectivity of a directed hypergraph.\n", "title": "On the spectrum of directed uniform and non-uniform hypergraphs" }
null
null
null
null
true
null
13829
null
Default
null
null
null
{ "abstract": " We introduce uncertainty regions to perform inference on partial correlations\nwhen data are missing not at random. These uncertainty regions are shown to\nhave a desired asymptotic coverage. Their finite sample performance is\nillustrated via simulations and real data example.\n", "title": "Inference for partial correlation when data are missing not at random" }
null
null
null
null
true
null
13830
null
Default
null
null
null
{ "abstract": " Interstitial content is online content which grays out, or otherwise obscures\nthe main page content. In this technical report, we discuss exploratory\nresearch into detecting the presence of interstitial content in web pages. We\ndiscuss the use of computer vision techniques to detect interstitials, and the\npotential use of these techniques to provide a labelled dataset for machine\nlearning.\n", "title": "Interstitial Content Detection" }
null
null
null
null
true
null
13831
null
Default
null
null
null
{ "abstract": " Spiking neuronal networks are usually simulated with three main simulation\nschemes: the classical time-driven and event-driven schemes, and the more\nrecent hybrid scheme. All three schemes evolve the state of a neuron through a\nseries of checkpoints: equally spaced in the first scheme and determined\nneuron-wise by spike events in the latter two. The time-driven and the hybrid\nscheme determine whether the membrane potential of a neuron crosses a threshold\nat the end of of the time interval between consecutive checkpoints. Threshold\ncrossing can, however, occur within the interval even if this test is negative.\nSpikes can therefore be missed. The present work derives, implements, and\nbenchmarks a method for perfect retrospective spike detection. This method can\nbe applied to neuron models with affine or linear subthreshold dynamics. The\nidea behind the method is to propagate the threshold with a time-inverted\ndynamics, testing whether the threshold crosses the neuron state to be evolved,\nrather than vice versa. Algebraically this translates into a set of\ninequalities necessary and sufficient for threshold crossing. This test is\nslower than the imperfect one, but faster than an alternative perfect tests\nbased on bisection or root-finding methods. Comparison confirms earlier results\nthat the imperfect test rarely misses spikes (less than a fraction $1/10^8$ of\nmissed spikes) in biologically relevant settings. This study offers an\nalternative geometric point of view on neuronal dynamics.\n", "title": "Perfect spike detection via time reversal" }
null
null
null
null
true
null
13832
null
Default
null
null
null
{ "abstract": " A press release from the National Institute of Standards and Technology\n(NIST)could potentially impede progress toward improving the analysis of\nforensic evidence and the presentation of forensic analysis results in courts\nin the United States and around the world. \"NIST experts urge caution in use of\ncourtroom evidence presentation method\" was released on October 12, 2017, and\nwas picked up by the phys.org news service. It argues that, except in\nexceptional cases, the results of forensic analyses should not be reported as\n\"likelihood ratios\". The press release, and the journal article by NIST\nresearchers Steven P. Lund & Harri Iyer on which it is based, identifies some\nlegitimate points of concern, but makes a strawman argument and reaches an\nunjustified conclusion that throws the baby out with the bathwater.\n", "title": "A response to: \"NIST experts urge caution in use of courtroom evidence presentation method\"" }
null
null
null
null
true
null
13833
null
Default
null
null
null
{ "abstract": " Reinforcement learning is a powerful paradigm for learning optimal policies\nfrom experimental data. However, to find optimal policies, most reinforcement\nlearning algorithms explore all possible actions, which may be harmful for\nreal-world systems. As a consequence, learning algorithms are rarely applied on\nsafety-critical systems in the real world. In this paper, we present a learning\nalgorithm that explicitly considers safety, defined in terms of stability\nguarantees. Specifically, we extend control-theoretic results on Lyapunov\nstability verification and show how to use statistical models of the dynamics\nto obtain high-performance control policies with provable stability\ncertificates. Moreover, under additional regularity assumptions in terms of a\nGaussian process prior, we prove that one can effectively and safely collect\ndata in order to learn about the dynamics and thus both improve control\nperformance and expand the safe region of the state space. In our experiments,\nwe show how the resulting algorithm can safely optimize a neural network policy\non a simulated inverted pendulum, without the pendulum ever falling down.\n", "title": "Safe Model-based Reinforcement Learning with Stability Guarantees" }
null
null
null
null
true
null
13834
null
Default
null
null
null
{ "abstract": " The CEGAR loop in software model checking notoriously diverges when the\nabstraction refinement procedure does not derive a loop invariant. An\nabstraction refinement procedure based on an SMT solver is applied to a trace,\ni.e., a restricted form of a program (without loops). In this paper, we present\na new abstraction refinement procedure that aims at circumventing this\nrestriction whenever possible. We apply abstract interpretation to a program\nthat we derive from the given trace. If the program contains a loop, we are\nguaranteed to obtain a loop invariant. We call an SMT solver only in the case\nwhere the abstract interpretation returns an indefinite answer. That is, the\nidea is to use abstract interpretation and an SMT solver in tandem. An\nexperimental evaluation in the setting of trace abstraction indicates the\npractical potential of this idea.\n", "title": "Refining Trace Abstraction using Abstract Interpretation" }
null
null
null
null
true
null
13835
null
Default
null
null
null
{ "abstract": " The Sturm-Liouville operator with singular potentials on the lasso graph is\nconsidered. We suppose that the potential is known a priori on the boundary\nedge, and recover the potential on the loop from a part of the spectrum and\nsome additional data. We prove the uniqueness theorem and provide a\nconstructive algorithm for the solution of this partial inverse problem.\n", "title": "A partial inverse problem for the Sturm-Liouville operator on the graph with a loop" }
null
null
[ "Mathematics" ]
null
true
null
13836
null
Validated
null
null
null
{ "abstract": " We describe some progress towards a new common framework for model driven\nengineering, based on behavioral programming. The tool we have developed\nunifies almost all of the work done in behavioral programming so far, under a\ncommon set of interfaces. Its architecture supports pluggable event selection\nstrategies, which can make models more intuitive and compact. Program state\nspace can be traversed using various algorithms, such as DFS and A*.\nFurthermore, program state is represented in a way that enables scanning a\nstate space using parallel and distributed algorithms. Executable models\ncreated with this tool can be directly embedded in Java applications, enabling\na model-first approach to system engineering, where initially a model is\ncreated and verified, and then a working application is gradually built around\nthe model. The model itself consists of a collection of small scripts written\nin JavaScript (hence \"BPjs\"). Using a variety of case-studies, this paper shows\nhow the combination of a lenient programming language with formal model\nanalysis tools creates an efficient way of developing robust complex systems.\nAdditionally, as we learned from an experimental course we ran, the usage of\nJavaScript make practitioners more amenable to using this system and, thus,\nmodel checking and model driven engineering. In addition to providing\ninfrastructure for development and case-studies in behavioral programming, the\ntool is designed to serve as a common platform for research and innovation in\nbehavioral programming and in model driven engineering in general.\n", "title": "BPjs --- a framework for modeling reactive systems using a scripting language and BP" }
null
null
null
null
true
null
13837
null
Default
null
null
null
{ "abstract": " This work presents an algorithm to generate depth, quantum gate and qubit\noptimized circuits for $GF(2^m)$ squaring in the polynomial basis. Further, to\nthe best of our knowledge the proposed quantum squaring circuit algorithm is\nthe only work that considers depth as a metric to be optimized. We compared\ncircuits generated by our proposed algorithm against the state of the art and\ndetermine that they require $50 \\%$ fewer qubits and offer gates savings that\nrange from $37 \\%$ to $68 \\%$. Further, existing quantum exponentiation are\nbased on either modular or integer arithmetic. However, Galois arithmetic is a\nuseful tool to design resource efficient quantum exponentiation circuit\napplicable in quantum cryptanalysis. Therefore, we present the quantum circuit\nimplementation of Galois field exponentiation based on the proposed quantum\nGalois field squaring circuit. We calculated a qubit savings ranging between\n$44\\%$ to $50\\%$ and quantum gate savings ranging between $37 \\%$ to $68 \\%$\ncompared to identical quantum exponentiation circuit based on existing squaring\ncircuits.\n", "title": "Design of Quantum Circuits for Galois Field Squaring and Exponentiation" }
null
null
[ "Computer Science" ]
null
true
null
13838
null
Validated
null
null
null
{ "abstract": " We examine the Bayes-consistency of a recently proposed\n1-nearest-neighbor-based multiclass learning algorithm. This algorithm is\nderived from sample compression bounds and enjoys the statistical advantages of\ntight, fully empirical generalization bounds, as well as the algorithmic\nadvantages of a faster runtime and memory savings. We prove that this algorithm\nis strongly Bayes-consistent in metric spaces with finite doubling dimension\n--- the first consistency result for an efficient nearest-neighbor sample\ncompression scheme. Rather surprisingly, we discover that this algorithm\ncontinues to be Bayes-consistent even in a certain infinite-dimensional\nsetting, in which the basic measure-theoretic conditions on which classic\nconsistency proofs hinge are violated. This is all the more surprising, since\nit is known that $k$-NN is not Bayes-consistent in this setting. We pose\nseveral challenging open problems for future research.\n", "title": "Nearest-Neighbor Sample Compression: Efficiency, Consistency, Infinite Dimensions" }
null
null
null
null
true
null
13839
null
Default
null
null
null
{ "abstract": " Adaptive gradient methods such as AdaGrad and its variants update the\nstepsize in stochastic gradient descent on the fly according to the gradients\nreceived along the way; such methods have gained widespread use in large-scale\noptimization for their ability to converge robustly, without the need to fine\ntune parameters such as the stepsize schedule. Yet, the theoretical guarantees\nto date for AdaGrad are for online and convex optimization, which is quite\ndifferent from the offline and nonconvex setting where adaptive gradient\nmethods shine in practice. We bridge this gap by providing strong theoretical\nguarantees in batch and stochastic setting, for the convergence of AdaGrad over\nsmooth, nonconvex landscapes, from any initialization of the stepsize, without\nknowledge of Lipschitz constant of the gradient. We show in the stochastic\nsetting that AdaGrad converges to a stationary point at the optimal\n$O(1/\\sqrt{N})$ rate (up to a $\\log(N)$ factor), and in the batch setting, at\nthe optimal $O(1/N)$ rate. Moreover, in both settings, the constant in the rate\nmatches the constant obtained as if the variance of the gradient noise and\nLipschitz constant of the gradient were known in advance and used to tune the\nstepsize, up to a logarithmic factor of the mismatch between the optimal\nstepsize and the stepsize used to initialize AdaGrad. In particular, our\nresults imply that AdaGrad is robust to both the unknown Lipschitz constant and\nlevel of stochastic noise on the gradient, in a near-optimal sense. When there\nis noise, AdaGrad converges at the rate of $O(1/\\sqrt{N})$ with well-tuned\nstepsize, and when there is not noise, the same algorithm converges at the rate\nof $O(1/N)$ like well-tuned batch gradient descent.\n", "title": "AdaGrad stepsizes: Sharp convergence over nonconvex landscapes, from any initialization" }
null
null
null
null
true
null
13840
null
Default
null
null
null
{ "abstract": " An infinitely smooth convex body in $\\mathbb R^n$ is called polynomially\nintegrable of degree $N$ if its parallel section functions are polynomials of\ndegree $N$. We prove that the only smooth convex bodies with this property in\nodd dimensions are ellipsoids, if $N\\ge n-1$. This is in contrast with the case\nof even dimensions and the case of odd dimensions with $N<n-1$, where such\nbodies do not exist, as it was recently shown by Agranovsky.\n", "title": "On polynomially integrable convex bodies" }
null
null
null
null
true
null
13841
null
Default
null
null
null
{ "abstract": " Motivated by wide-ranging applications such as video delivery over networks\nusing Multiple Description Codes, congestion control, and inventory management,\nwe study the state-tracking of a Markovian random process with a known\ntransition matrix and a finite ordered state set. The decision-maker must\nselect a state as an action at each time step to minimize the total expected\ncost. The decision-maker is faced with asymmetries both in cost and\nobservation: in case the selected state is less than the actual state of the\nMarkovian process, an under-utilization cost occurs and only partial\nobservation about the actual state is revealed; otherwise, the decision incurs\nan over-utilization cost and reveals full information about the actual state.\nWe can formulate this problem as a Partially Observable Markov Decision Process\nwhich can be expressed as a dynamic program based on the last full observed\nstate and the time of full observation. This formulation determines the\nsequence of actions to be taken between any two consecutive full observations\nof the actual state. However, this DP grows exponentially in the number of\nstates, with little hope for a computationally feasible solution. We present an\ninteresting class of computationally tractable policies with a percentile\nstructure. A generalization of binary search, this class of policies attempt at\nany given time to reduce the uncertainty by a given percentage. Among all\npercentile policies, we search for the one with the minimum expected cost. The\nresult of this search is a heuristic policy which we evaluate through numerical\nsimulations. We show that it outperforms the myopic policies and under some\nconditions performs close to the optimal policies. Furthermore, we derive a\nlower bound on the cost of the optimal policy which can be computed with low\ncomplexity and give a measure for how close our heuristic policy is to the\noptimal policy.\n", "title": "Percentile Policies for Tracking of Markovian Random Processes with Asymmetric Cost and Observation" }
null
null
null
null
true
null
13842
null
Default
null
null
null
{ "abstract": " In this paper, we present a novel approach to identify the generators and\nstates responsible for the small-signal stability of power networks. To this\nend, the newly developed notion of information transfer between the states of a\ndynamical system is used. In particular, using the concept of information\ntransfer, which characterizes influence between the various states and a linear\ncombination of states of a dynamical system, we identify the generators and\nstates which are responsible for causing instability of the power network.\nWhile characterizing influence from state to state, information transfer can\nalso describe influence from state to modes thereby generalizing the well-known\nnotion of participation factor while at the same time overcoming some of the\nlimitations of the participation factor. The developed framework is applied to\nstudy the three bus system identifying various cause of instabilities in the\nsystem. The simulation study is extended to IEEE 39 bus system.\n", "title": "On Information Transfer Based Characterization of Power System Stability" }
null
null
null
null
true
null
13843
null
Default
null
null
null
{ "abstract": " The classes of depth-bounded and name-bounded processes are fragments of the\npi-calculus for which some of the decision problems that are undecidable for\nthe full calculus become decidable. P is depth-bounded at level k if every\nreduction sequence for P contains successor processes with at most k active\nnested restrictions. P is name-bounded at level k if every reduction sequence\nfor P contains successor processes with at most k active bound names.\nMembership of these classes of processes is undecidable. In this paper we use\nbinary session types to decise two type systems that give a sound\ncharacterization of the properties: If a process is well-typed in our first\nsystem, it is depth-bounded. If a process is well-typed in our second, more\nrestrictive type system, it will also be name-bounded.\n", "title": "Using Session Types for Reasoning About Boundedness in the Pi-Calculus" }
null
null
null
null
true
null
13844
null
Default
null
null
null
{ "abstract": " The secretary problem is a classic model for online decision making.\nRecently, combinatorial extensions such as matroid or matching secretary\nproblems have become an important tool to study algorithmic problems in dynamic\nmarkets. Here the decision maker must know the numerical value of each arriving\nelement, which can be a demanding informational assumption. In this paper, we\ninitiate the study of combinatorial secretary problems with ordinal\ninformation, in which the decision maker only needs to be aware of a preference\norder consistent with the values of arrived elements. The goal is to design\nonline algorithms with small competitive ratios.\nFor a variety of combinatorial problems, such as bipartite matching, general\npacking LPs, and independent set with bounded local independence number, we\ndesign new algorithms that obtain constant competitive ratios.\nFor the matroid secretary problem, we observe that many existing algorithms\nfor special matroid structures maintain their competitive ratios even in the\nordinal model. In these cases, the restriction to ordinal information does not\nrepresent any additional obstacle. Moreover, we show that ordinal variants of\nthe submodular matroid secretary problems can be solved using algorithms for\nthe linear versions by extending [Feldman and Zenklusen, 2015]. In contrast, we\nprovide a lower bound of $\\Omega(\\sqrt{n}/(\\log n))$ for algorithms that are\noblivious to the matroid structure, where $n$ is the total number of elements.\nThis contrasts an upper bound of $O(\\log n)$ in the cardinal model, and it\nshows that the technique of thresholding is not sufficient for good algorithms\nin the ordinal model.\n", "title": "Combinatorial Secretary Problems with Ordinal Information" }
null
null
null
null
true
null
13845
null
Default
null
null
null
{ "abstract": " In order to understand the formation of social conventions we need to know\nthe specific role of control and learning in multi-agent systems. To advance in\nthis direction, we propose, within the framework of the Distributed Adaptive\nControl (DAC) theory, a novel Control-based Reinforcement Learning architecture\n(CRL) that can account for the acquisition of social conventions in multi-agent\npopulations that are solving a benchmark social decision-making problem. Our\nnew CRL architecture, as a concrete realization of DAC multi-agent theory,\nimplements a low-level sensorimotor control loop handling the agent's reactive\nbehaviors (pre-wired reflexes), along with a layer based on model-free\nreinforcement learning that maximizes long-term reward. We apply CRL in a\nmulti-agent game-theoretic task in which coordination must be achieved in order\nto find an optimal solution. We show that our CRL architecture is able to both\nfind optimal solutions in discrete and continuous time and reproduce human\nexperimental data on standard game-theoretic metrics such as efficiency in\nacquiring rewards, fairness in reward distribution and stability of convention\nformation.\n", "title": "Modeling the Formation of Social Conventions in Multi-Agent Populations" }
null
null
null
null
true
null
13846
null
Default
null
null
null
{ "abstract": " In this series of papers, we develop the theory of a class of locally compact\nquantum groupoids, which is motivated by the purely algebraic notion of weak\nmultiplier Hopf algebras. In this Part I, we provide motivation and formulate\nthe definition in the C*-algebra framework. Existence of a certain canonical\nidempotent element is required and it plays a fundamental role, including the\nestablishment of the coassociativity of the comultiplication. This class\ncontains locally compact quantum groups as a subclass.\n", "title": "A class of C*-algebraic locally compact quantum groupoids Part I. Motivation and definition" }
null
null
null
null
true
null
13847
null
Default
null
null
null
{ "abstract": " We prove a Lieb-Schultz-Mattis theorem for the quantum spin Hall effect\n(QSHE) in two-dimensional $\\pi$-flux models. In the presence of time reversal,\n$U(1)$ charge conservation and magnetic translation (with $\\pi$-flux per unit\ncell) symmetries, if a generic interacting Hamiltonian has a unique gapped\nsymmetric ground state at half filling (i.e. an odd number of electrons per\nunit cell), it can only be a QSH insulator. In other words, a trivial Mott\ninsulator is forbidden by symmetries at half filling. We further show that such\na symmetry-enforced QSHE can be realized in cold atoms, by shaking an optical\nlattice and applying a time-dependent Zeeman field.\n", "title": "Symmetry-enforced quantum spin Hall insulators in $π$-flux models" }
null
null
null
null
true
null
13848
null
Default
null
null
null
{ "abstract": " We present a stabilized microwave-frequency transfer technique that is based\non optical phase-sensing and optical phase-actuation. This technique shares\nseveral attributes with optical-frequency transfer and therefore exhibits\nseveral advantages over other microwave-frequency transfer techniques. We\ndemonstrated stabilized transfer of an 8,000 MHz microwave-frequency signal\nover a 166 km metropolitan optical fiber network, achieving a fractional\nfrequency stability of 6.8x10^-14 Hz/Hz at 1 s integration, and 5.0x10^-16\nHz/Hz at 1.6x10^4 s. This technique is being considered for use on the Square\nKilometre Array SKA1-mid radio telescope.\n", "title": "Stabilized microwave-frequency transfer using optical phase sensing and actuation" }
null
null
null
null
true
null
13849
null
Default
null
null
null
{ "abstract": " We define and study a numerical-range analogue of the notion of spectral set.\nAmong the results obtained are a positivity criterion and a dilation theorem,\nanalogous to those already known for spectral sets. An important difference\nfrom the classical definition is the role played in the new definition by the\nbase point. We present some examples to illustrate this aspect.\n", "title": "Spectral sets for numerical range" }
null
null
null
null
true
null
13850
null
Default
null
null
null
{ "abstract": " It is of fundamental importance to find algorithms obtaining optimal\nperformance for learning of statistical models in distributed and communication\nlimited systems. Aiming at characterizing the optimal strategies, we consider\nlearning of Gaussian Processes (GPs) in distributed systems as a pivotal\nexample. We first address a very basic problem: how many bits are required to\nestimate the inner-products of Gaussian vectors across distributed machines?\nUsing information theoretic bounds, we obtain an optimal solution for the\nproblem which is based on vector quantization. Two suboptimal and more\npractical schemes are also presented as substitute for the vector quantization\nscheme. In particular, it is shown that the performance of one of the practical\nschemes which is called per-symbol quantization is very close to the optimal\none. Schemes provided for the inner-product calculations are incorporated into\nour proposed distributed learning methods for GPs. Experimental results show\nthat with spending few bits per symbol in our communication scheme, our\nproposed methods outperform previous zero rate distributed GP learning schemes\nsuch as Bayesian Committee Model (BCM) and Product of experts (PoE).\n", "title": "Learning of Gaussian Processes in Distributed and Communication Limited Systems" }
null
null
null
null
true
null
13851
null
Default
null
null
null
{ "abstract": " Although deep learning has historical roots going back decades, neither the\nterm \"deep learning\" nor the approach was popular just over five years ago,\nwhen the field was reignited by papers such as Krizhevsky, Sutskever and\nHinton's now classic (2012) deep network model of Imagenet. What has the field\ndiscovered in the five subsequent years? Against a background of considerable\nprogress in areas such as speech recognition, image recognition, and game\nplaying, and considerable enthusiasm in the popular press, I present ten\nconcerns for deep learning, and suggest that deep learning must be supplemented\nby other techniques if we are to reach artificial general intelligence.\n", "title": "Deep Learning: A Critical Appraisal" }
null
null
null
null
true
null
13852
null
Default
null
null
null
{ "abstract": " Social media is an useful platform to share health-related information due to\nits vast reach. This makes it a good candidate for public-health monitoring\ntasks, specifically for pharmacovigilance. We study the problem of extraction\nof Adverse-Drug-Reaction (ADR) mentions from social media, particularly from\ntwitter. Medical information extraction from social media is challenging,\nmainly due to short and highly information nature of text, as compared to more\ntechnical and formal medical reports.\nCurrent methods in ADR mention extraction relies on supervised learning\nmethods, which suffers from labeled data scarcity problem. The State-of-the-art\nmethod uses deep neural networks, specifically a class of Recurrent Neural\nNetwork (RNN) which are Long-Short-Term-Memory networks (LSTMs)\n\\cite{hochreiter1997long}. Deep neural networks, due to their large number of\nfree parameters relies heavily on large annotated corpora for learning the end\ntask. But in real-world, it is hard to get large labeled data, mainly due to\nheavy cost associated with manual annotation. Towards this end, we propose a\nnovel semi-supervised learning based RNN model, which can leverage unlabeled\ndata also present in abundance on social media. Through experiments we\ndemonstrate the effectiveness of our method, achieving state-of-the-art\nperformance in ADR mention extraction.\n", "title": "Semi-Supervised Recurrent Neural Network for Adverse Drug Reaction Mention Extraction" }
null
null
null
null
true
null
13853
null
Default
null
null
null
{ "abstract": " This paper introduces a novel deep learning framework including a\nlexicon-based approach for sentence-level prediction of sentiment label\ndistribution. We propose to first apply semantic rules and then use a Deep\nConvolutional Neural Network (DeepCNN) for character-level embeddings in order\nto increase information for word-level embedding. After that, a Bidirectional\nLong Short-Term Memory Network (Bi-LSTM) produces a sentence-wide feature\nrepresentation from the word-level embedding. We evaluate our approach on three\nTwitter sentiment classification datasets. Experimental results show that our\nmodel can improve the classification accuracy of sentence-level sentiment\nanalysis in Twitter social networking.\n", "title": "A Deep Neural Architecture for Sentence-level Sentiment Classification in Twitter Social Networking" }
null
null
null
null
true
null
13854
null
Default
null
null
null
{ "abstract": " Net Asset Value (NAV) calculation and validation is the principle task of a\nfund administrator. If the NAV of a fund is calculated incorrectly then there\nis huge impact on the fund administrator; such as monetary compensation,\nreputational loss, or loss of business. In general, these companies use the\nsame methodology to calculate the NAV of a fund, however the type of fund in\nquestion dictates the set of business rules used to validate this. Today, most\nFund Administrators depend heavily on human resources due to the lack of an\nautomated standardized solutions, however due to economic climate and the need\nfor efficiency and costs reduction many banks are now looking for an automated\nsolution with minimal human interaction; i.e., straight through processing\n(STP). Within the scope of a collaboration project that focuses on building an\noptimal solution for NAV validation, in this paper, we will present a new\napproach for detecting correlated business rules. We also show how we evaluate\nthis approach using real-world financial data.\n", "title": "A Tree-based Approach for Detecting Redundant Business Rules in very Large Financial Datasets" }
null
null
null
null
true
null
13855
null
Default
null
null
null
{ "abstract": " The nonlinear Klein-Gordon (NLKG) equation on a manifold $M$ in the\nnonrelativistic limit, namely as the speed of light $c$ tends to infinity, is\nconsidered. In particular, a higher-order normalized approximation of NLKG\n(which corresponds to the NLS at order $r=1$) is constructed, and when $M$ is a\nsmooth compact manifold or $\\mathbb{R}^d$ it is proved that the solution of the\napproximating equation approximates the solution of the NLKG locally uniformly\nin time. When $M=\\mathbb{R}^d$, $d \\geq 3$, it is proved that solutions of the\nlinearized order $r$ normalized equation approximate solutions of linear\nKlein-Gordon equation up to times of order $\\mathcal{O}(c^{2(r-1)})$ for any\n$r>1$.\n", "title": "Dynamics of the nonlinear Klein-Gordon equation in the nonrelativistic limit, I" }
null
null
null
null
true
null
13856
null
Default
null
null
null
{ "abstract": " We review studies of superintense laser interaction with solid targets where\nthe generation of propagating surface plasmons (or surface waves) plays a key\nrole. These studies include the onset of plasma instabilities at the irradiated\nsurface, the enhancement of secondary emissions (protons, electrons, and\nphotons as high harmonics in the XUV range) in femtosecond interactions with\ngrating targets, and the generation of unipolar current pulses with picosecond\nduration. The experimental results give evidence of the existence of surface\nplasmons in the nonlinear regime of relativistic electron dynamics. These\nfindings open up a route to the improvement of ultrashort laser-driven sources\nof energetic radiation and, more in general, to the extension of plasmonics in\na high field regime.\n", "title": "Surface plasmons in superintense laser-solid interactions" }
null
null
null
null
true
null
13857
null
Default
null
null
null
{ "abstract": " Grasping skill is a major ability that a wide number of real-life\napplications require for robotisation. State-of-the-art robotic grasping\nmethods perform prediction of object grasp locations based on deep neural\nnetworks. However, such networks require huge amount of labeled data for\ntraining making this approach often impracticable in robotics. In this paper,\nwe propose a method to generate a large scale synthetic dataset with ground\ntruth, which we refer to as the Jacquard grasping dataset. Jacquard is built on\na subset of ShapeNet, a large CAD models dataset, and contains both RGB-D\nimages and annotations of successful grasping positions based on grasp attempts\nperformed in a simulated environment. We carried out experiments using an\noff-the-shelf CNN, with three different evaluation metrics, including real\ngrasping robot trials. The results show that Jacquard enables much better\ngeneralization skills than a human labeled dataset thanks to its diversity of\nobjects and grasping positions. For the purpose of reproducible research in\nrobotics, we are releasing along with the Jacquard dataset a web interface for\nresearchers to evaluate the successfulness of their grasping position\ndetections using our dataset.\n", "title": "Jacquard: A Large Scale Dataset for Robotic Grasp Detection" }
null
null
null
null
true
null
13858
null
Default
null
null
null
{ "abstract": " We compute the Hochschild cohomology ring of the algebras $A= k\\langle X,\nY\\rangle/ (X^a, XY-qYX, Y^a)$ over a field $k$ where $a\\geq 2$ and where $q\\in\nk$ is a primitive $a$-th root of unity. We find the the dimension of\n$\\mathrm{HH}^n(A)$ and show that it is independent of $a$. We compute\nexplicitly the ring structure of the even part of the Hochschild cohomology\nmodulo homogeneous nilpotent elements.\n", "title": "Hochschild cohomology of some quantum complete intersections" }
null
null
null
null
true
null
13859
null
Default
null
null
null
{ "abstract": " We provide complete source code for a front-end GUI and its back-end\ncounterpart for a stock market visualization tool. It is built based on the\n\"functional visualization\" concept we discuss, whereby functionality is not\nsacrificed for fancy graphics. The GUI, among other things, displays a\ncolor-coded signal (computed by the back-end code) based on how \"out-of-whack\"\neach stock is trading compared with its peers (\"mean-reversion\"), and the most\nsizable changes in the signal (\"momentum\"). The GUI also allows to efficiently\nfilter/tier stocks by various parameters (e.g., sector, exchange, signal,\nliquidity, market cap) and functionally display them. The tool can be run as a\nweb-based or local application.\n", "title": "Stock Market Visualization" }
null
null
null
null
true
null
13860
null
Default
null
null
null
{ "abstract": " The pioneering work of Brezis-Merle [7], Li-Shafrir [27], Li [26] and\nBartolucci-Tarantello [4] showed that any sequence of blow up solutions for\n(singular) mean field equations of Liouville type must exhibit a \"mass\nconcentration\" property. A typical situation of blow-up occurs when we let the\nsingular (vortex) points involved in the equation (see (1.1) below) collapse\ntogether. However in this case Lin-Tarantello in [30] pointed out that the\nphenomenon: \"bubbling implies mass concentration\" might not occur and new\nscenarios open for investigation. In this paper, we present two explicit\nexamples which illustrate (with mathematical rigor) how a \"non-concentration\"\nsituation does happen and its new features. Among other facts, we show that in\ncertain situations, the collapsing rate of the singularities can be used as\nblow up parameter to describe the bubbling properties of the solution-sequence.\nIn this way we are able to establish accurate estimates around the blow-up\npoints which we hope to use towards a degree counting formula for the shadow\nsystem (1.34) below.\n", "title": "Sharp estimates for solutions of mean field equation with collapsing singularity" }
null
null
null
null
true
null
13861
null
Default
null
null
null
{ "abstract": " We consider a scenario of broadcasting information over a network of nodes\nconnected by noiseless communication links. A source node in the network has\n$k$ data packets to broadcast, and it suffices that a large fraction of the\nnetwork nodes receives the broadcast. The source encodes the $k$ data packets\ninto $n \\ge k$ coded packets using a maximum distance separable (MDS) code, and\ntransmits them to its one-hop neighbours. Every other node in the network\nfollows a probabilistic forwarding protocol, in which it forwards a previously\nunreceived packet to all its neighbours with a certain probability $p$. A\n\"near-broadcast\" is when the expected fraction of nodes that receive at least\n$k$ of the $n$ coded packets is close to $1$. The forwarding probability $p$ is\nchosen so as to minimize the expected total number of transmissions needed for\na near-broadcast. In this paper, we analyze the probabilistic forwarding of\ncoded packets on two specific network topologies: binary trees and square\ngrids. For trees, our analysis shows that for fixed $k$, the expected total\nnumber of transmissions increases with $n$. On the other hand, on grids, we use\nideas from percolation theory to show that a judicious choice of $n$ will\nsignificantly reduce the expected total number of transmissions needed for a\nnear-broadcast.\n", "title": "Probabilistic Forwarding of Coded Packets on Networks" }
null
null
null
null
true
null
13862
null
Default
null
null
null
{ "abstract": " This paper contributes to the techniques of topo-algebraic recognition for\nlanguages beyond the regular setting as they relate to logic on words. In\nparticular, we provide a general construction on recognisers corresponding to\nadding one layer of various kinds of quantifiers and prove a related\nReutenauer-type theorem. Our main tools are codensity monads and duality\ntheory. Our construction hinges, in particular, on a measure-theoretic\ncharacterisation of the profinite monad of the free S-semimodule monad for\nfinite and commutative semirings S, which generalises our earlier insight that\nthe Vietoris monad on Boolean spaces is the codensity monad of the finite\npowerset functor.\n", "title": "Quantifiers on languages and codensity monads" }
null
null
null
null
true
null
13863
null
Default
null
null
null
{ "abstract": " In many practical problems, a learning agent may want to learn the best\naction in hindsight without ever taking a bad action, which is significantly\nworse than the default production action. In general, this is impossible\nbecause the agent has to explore unknown actions, some of which can be bad, to\nlearn better actions. However, when the actions are combinatorial, this may be\npossible if the unknown action can be evaluated by interleaving it with the\nproduction action. We formalize this concept as learning in stochastic\ncombinatorial semi-bandits with exchangeable actions. We design efficient\nlearning algorithms for this problem, bound their n-step regret, and evaluate\nthem on both synthetic and real-world problems. Our real-world experiments show\nthat our algorithms can learn to recommend K most attractive movies without\never violating a strict production constraint, both overall and subject to a\ndiversity constraint.\n", "title": "Conservative Exploration using Interleaving" }
null
null
[ "Statistics" ]
null
true
null
13864
null
Validated
null
null
null
{ "abstract": " This paper investigates power control and relay selection in Full Duplex\nCognitive Relay Networks (FDCRNs), where the secondary-user (SU) relays can\nsimultaneously receive data from the SU source and forward them to the SU\ndestination. We study both non-coherent and coherent scenarios. In the\nnon-coherent case, the SU relay forwards the signal from the SU source without\nregulating the phase; while in the coherent scenario, the SU relay regulates\nthe phase when forwarding the signal to minimize the interference at the\nprimary-user (PU) receiver. We consider the problem of maximizing the\ntransmission rate from the SU source to the SU destination subject to the\ninterference constraint at the PU receiver and power constraints at both the SU\nsource and SU relay. We then develop a mathematical model to analyze the data\nrate performance of the FDCRN considering the self-interference effects at the\nFD relay. We develop low-complexity and high-performance joint power control\nand relay selection algorithms. Extensive numerical results are presented to\nillustrate the impacts of power level parameters and the self-interference\ncancellation quality on the rate performance. Moreover, we demonstrate the\nsignificant gain of phase regulation at the SU relay.\n", "title": "Power Allocation for Full-Duplex Relay Selection in Underlay Cognitive Radio Networks: Coherent versus Non-Coherent Scenarios" }
null
null
null
null
true
null
13865
null
Default
null
null
null
{ "abstract": " We are working on a scalable, interactive visualization system, called\nCarina, for people to explore million-node graphs. By using latest web browser\ntechnologies, Carina offers fast graph rendering via WebGL, and works across\ndesktop (via Electron) and mobile platforms. Different from most existing graph\nvisualization tools, Carina does not store the full graph in RAM, enabling it\nto work with graphs with up to 69M edges. We are working to improve and\nopen-source Carina, to offer researchers and practitioners a new, scalable way\nto explore and visualize large graph datasets.\n", "title": "Carina: Interactive Million-Node Graph Visualization using Web Browser Technologies" }
null
null
null
null
true
null
13866
null
Default
null
null
null
{ "abstract": " In this note we determine all possible dominations between different products\nof manifolds, when none of the factors of the codomain is dominated by\nproducts. As a consequence, we determine the finiteness of every\nproduct-associated functorial semi-norm on the fundamental classes of the\naforementioned products. These results give partial answers to questions of M.\nGromov.\n", "title": "Domination between different products and finiteness of associated semi-norms" }
null
null
null
null
true
null
13867
null
Default
null
null
null
{ "abstract": " Consider a space X with the singular locus, Z=Sing(X), of positive dimension.\nSuppose both Z and X are locally complete intersections. The transversal type\nof X along Z is generically constant but at some points of Z it degenerates. We\nintroduce (under certain conditions) the discriminant of the transversal type,\na subscheme of Z, that reflects these degenerations whenever the generic\ntransversal type is `ordinary'.\nThe scheme structure of this discriminant is imposed by various compatibility\nproperties and is often non-reduced. We establish the basic properties of this\ndiscriminant: it is a Cartier divisor in Z, functorial under base change, flat\nunder some deformations of (X,Z), and compatible with pullback under some\nmorphisms, etc.\nFurthermore, we study the local geometry of this discriminant, e.g. we\ncompute its multiplicity at a point, and we obtain the resolution of its\nstructure sheaf (as module on Z) and study the locally defining equation.\n", "title": "Discriminant of the ordinary transversal singularity type. The local aspects" }
null
null
null
null
true
null
13868
null
Default
null
null
null
{ "abstract": " Based on recent high-resolution angle-resolved photoemission spectroscopy\nmeasurement in monolayer FeSe grown on SrTiO$_{3}$, we constructed a\ntight-binding model and proposed a superconducting (SC) pairing function which\ncan well fit the observed band structure and SC gap anisotropy. Then we\ninvestigated the spin excitation spectra in order to determine the possible\nsign structure of the SC order parameter. We found that a resonance-like spin\nexcitation may occur if the SC order parameter changes sign along the Fermi\nsurfaces. However, this resonance is located at different locations in momentum\nspace compared to other FeSe-based superconductors, suggesting that the Fermi\nsurface shape and pairing symmetry in monolayer FeSe grown on SrTiO$_{3}$ may\nbe different from other FeSe-based superconductors.\n", "title": "Possible spin excitation structure in monolayer FeSe grown on SrTiO$_{3}$" }
null
null
null
null
true
null
13869
null
Default
null
null
null
{ "abstract": " The renormalization method based on the Newton-Maclaurin expansion is applied\nto study the transient behavior of the solutions to the difference equations as\nthey tend to the steady-states. The key and also natural step is to make the\nrenormalization equations to be continuous such that the elementary functions\ncan be used to describe the transient behavior of the solutions to difference\nequations. As the concrete examples, we deal with the important second order\nnonlinear difference equations with a small parameter. The result shows that\nthe method is more natural than the multi-scale method.\n", "title": "Transient behavior of the solutions to the second order difference equations by the renormalization method based on Newton-Maclaurin expansion" }
null
null
null
null
true
null
13870
null
Default
null
null
null
{ "abstract": " We present a large-scale study of gender bias in occupation classification, a\ntask where the use of machine learning may lead to negative outcomes on\npeoples' lives. We analyze the potential allocation harms that can result from\nsemantic representation bias. To do so, we study the impact on occupation\nclassification of including explicit gender indicators---such as first names\nand pronouns---in different semantic representations of online biographies.\nAdditionally, we quantify the bias that remains when these indicators are\n\"scrubbed,\" and describe proxy behavior that occurs in the absence of explicit\ngender indicators. As we demonstrate, differences in true positive rates\nbetween genders are correlated with existing gender imbalances in occupations,\nwhich may compound these imbalances.\n", "title": "Bias in Bios: A Case Study of Semantic Representation Bias in a High-Stakes Setting" }
null
null
null
null
true
null
13871
null
Default
null
null
null
{ "abstract": " This paper addresses deep face recognition (FR) problem under open-set\nprotocol, where ideal face features are expected to have smaller maximal\nintra-class distance than minimal inter-class distance under a suitably chosen\nmetric space. However, few existing algorithms can effectively achieve this\ncriterion. To this end, we propose the angular softmax (A-Softmax) loss that\nenables convolutional neural networks (CNNs) to learn angularly discriminative\nfeatures. Geometrically, A-Softmax loss can be viewed as imposing\ndiscriminative constraints on a hypersphere manifold, which intrinsically\nmatches the prior that faces also lie on a manifold. Moreover, the size of\nangular margin can be quantitatively adjusted by a parameter $m$. We further\nderive specific $m$ to approximate the ideal feature criterion. Extensive\nanalysis and experiments on Labeled Face in the Wild (LFW), Youtube Faces (YTF)\nand MegaFace Challenge show the superiority of A-Softmax loss in FR tasks. The\ncode has also been made publicly available.\n", "title": "SphereFace: Deep Hypersphere Embedding for Face Recognition" }
null
null
null
null
true
null
13872
null
Default
null
null
null
{ "abstract": " We consider the massless nonlinear Dirac (NLD) equation in $1+1$ dimension\nwith scalar-scalar self-interaction $\\frac{g^2}{2} (\\bar{\\Psi} \\Psi)^2$ in the\npresence of three external electromagnetic potentials $V(x)$, a potential\nbarrier, a constant potential, and a potential well. By solving numerically the\nNLD equation, we find that, for all three cases, after a short transit time,\nthe initial pulse breaks into two pulses which are solutions of the massless\nlinear Dirac equation traveling in opposite directions with the speed of light.\nDuring this splitting the charge and the energy are conserved, whereas the\nmomentum is conserved when the solutions possess specific symmetries. For the\ncase of the constant potential, we derive exact analytical solutions of the\nmassless NLD equation that are also solutions of the massless linearized Dirac\nequation.\n", "title": "Speed-of-light pulses in the massless nonlinear Dirac equation with a potential" }
null
null
null
null
true
null
13873
null
Default
null
null
null
{ "abstract": " The two-dimensional non-oriented bin packing problem with due dates packs a\nset of rectangular items, which may be rotated by 90 degrees, into identical\nrectangular bins. The bins have equal processing times. An item's lateness is\nthe difference between its due date and the completion time of its bin. The\nproblem packs all items without overlap as to minimize maximum lateness Lmax.\nThe paper proposes a tight lower bound that enhances an existing bound on\nLmax for 24.07% of the benchmark instances and matches it in 30.87% cases. In\naddition, it models the problem using mixed integer programming (MIP), and\nsolves small-sized instances exactly using CPLEX. It approximately solves\nlarger-sized instances using a two-stage heuristic. The first stage constructs\nan initial solution via a first-fit heuristic that applies an iterative\nconstraint programming (CP)-based neighborhood search. The second stage, which\nis iterative too, approximately solves a series of assignment low-level MIPs\nthat are guided by feasibility constraints. It then enhances the solution via a\nhigh-level random local search. The approximate approach improves existing\nupper bounds by 27.45% on average, and obtains the optimum for 33.93% of the\ninstances. Overall, the exact and approximate approaches identify the optimum\nfor 39.07% cases.\nThe proposed approach is applicable to complex problems. It applies CP and\nMIP sequentially, while exploring their advantages, and hybridizes heuristic\nsearch with MIP. It embeds a new lookahead strategy that guards against\ninfeasible search directions and constrains the search to improving directions\nonly; thus, differs from traditional lookahead beam searches.\n", "title": "A Hybrid Feasibility Constraints-Guided Search to the Two-Dimensional Bin Packing Problem with Due Dates" }
null
null
[ "Computer Science" ]
null
true
null
13874
null
Validated
null
null
null
{ "abstract": " To constrain models of high-mass star formation, the Herschel/HOBYS KP aims\nat discovering massive dense cores (MDCs) able to host the high-mass analogs of\nlow-mass prestellar cores, which have been searched for over the past decade.\nWe here focus on NGC6334, one of the best-studied HOBYS molecular cloud\ncomplexes.\nWe used Herschel PACS and SPIRE 70-500mu images of the NGC6334 complex\ncomplemented with (sub)millimeter and mid-infrared data. We built a complete\nprocedure to extract ~0.1 pc dense cores with the getsources software, which\nsimultaneously measures their far-infrared to millimeter fluxes. We carefully\nestimated the temperatures and masses of these dense cores from their SEDs.\nA cross-correlation with high-mass star formation signposts suggests a mass\nthreshold of 75Msun for MDCs in NGC6334. MDCs have temperatures of 9.5-40K,\nmasses of 75-1000Msun, and densities of 10^5-10^8cm-3. Their mid-IR emission is\nused to separate 6 IR-bright and 10 IR-quiet protostellar MDCs while their 70mu\nemission strength, with respect to fitted SEDs, helps identify 16 starless MDC\ncandidates. The ability of the latter to host high-mass prestellar cores is\ninvestigated here and remains questionable. An increase in mass and density\nfrom the starless to the IR-quiet and IR-bright phases suggests that the\nprotostars and MDCs simultaneously grow in mass. The statistical lifetimes of\nthe high-mass prestellar and protostellar core phases, estimated to be\n1-7x10^4yr and at most 3x10^5yr respectively, suggest a dynamical scenario of\nhigh-mass star formation.\nThe present study provides good mass estimates for a statistically\nsignificant sample, covering the earliest phases of high-mass star formation.\nHigh-mass prestellar cores may not exist in NGC6334, favoring a scenario\npresented here, which simultaneously forms clouds and high-mass protostars.\n", "title": "The earliest phases of high-mass star formation, as seen in NGC 6334 by \\emph{Herschel}" }
null
null
null
null
true
null
13875
null
Default
null
null
null
{ "abstract": " Efficiency of the error control of numerical solutions of partial\ndifferential equations entirely depends on the two factors: accuracy of an a\nposteriori error majorant and the computational cost of its evaluation for some\ntest function/vector-function plus the cost of the latter. In the paper,\nconsistency of an a posteriori bound implies that it is the same in the order\nwith the respective unimprovable a priori bound. Therefore, it is the basic\ncharacteristic related to the first factor. The paper is dedicated to the\nelliptic diffusion-reaction equations. We present a guaranteed robust a\nposteriori error majorant effective at any nonnegative constant reaction\ncoefficient (r.c.). For a wide range of finite element solutions on a\nquasiuniform meshes the majorant is consistent. For big values of r.c. the\nmajorant coincides with the majorant of Aubin (1972), which, as it is known,\nfor not big r.c. ($<ch^{-2}$) is inconsistent and loses its sense at r.c.\napproaching zero. Our majorant improves also some other majorants derived for\nthe Poisson and reaction-diffusion equations.\n", "title": "Robust consistent a posteriori error majorants for approximate solutions of diffusion-reaction equations" }
null
null
null
null
true
null
13876
null
Default
null
null
null
{ "abstract": " This paper finds near equilibrium prices for electricity markets with\nnonconvexities due to binary variables, in order to reduce the market\nparticipants' opportunity costs, such as generators' unrecovered costs. The\nopportunity cost is defined as the difference between the profit when the\ninstructions of the market operator are followed and when the market\nparticipants can freely make their own decisions based on the market prices. We\nuse the minimum complementarity approximation to the minimum total opportunity\ncost (MTOC) model, from previous research, with tests on a much more realistic\nunit commitment (UC) model than in previous research, including features such\nas reserve requirements, ramping constraints, and minimum up and down times.\nThe developed model incorporates flexible price responsive demand, as in\nprevious research, but since not all demand is price responsive, we consider\nthe more realistic case that total demand is a mixture of fixed and flexible.\nAnother improvement over previous MTOC research is computational: whereas the\nprevious research had nonconvex terms among the objective function's continuous\nvariables, we convert the objective to an equivalent form that contains only\nlinear and convex quadratic terms in the continuous variables. We compare the\nunit commitment model with the standard social welfare optimization version of\nUC, in a series of sensitivity analyses, varying flexible demand to represent\nvarying degrees of future penetration of electric vehicles and smart\nappliances, different ratios of generation availability, and different values\nof transmission line capacities to consider possible congestion. The minimum\ntotal opportunity cost and social welfare solutions are mostly very close in\ndifferent scenarios, except in some extreme cases.\n", "title": "Extended opportunity cost model to find near equilibrium electricity prices under non-convexities" }
null
null
null
null
true
null
13877
null
Default
null
null
null
{ "abstract": " The two-sample hypothesis testing problem is studied for the challenging\nscenario of high dimensional data sets with small sample sizes. We show that\nthe two-sample hypothesis testing problem can be posed as a one-class set\nclassification problem. In the set classification problem the goal is to\nclassify a set of data points that are assumed to have a common class. We prove\nthat the average probability of error given a set is less than or equal to the\nBayes error and decreases as a power of $n$ number of sample data points in the\nset. We use the positive definite Set Kernel for directly mapping sets of data\nto an associated Reproducing Kernel Hilbert Space, without the need to learn a\nprobability distribution. We specifically solve the two-sample hypothesis\ntesting problem using a one-class SVM in conjunction with the proposed Set\nKernel. We compare the proposed method with the Maximum Mean Discrepancy,\nF-Test and T-Test methods on a number of challenging simulated high dimensional\nand small sample size data. We also perform two-sample hypothesis testing\nexperiments on six cancer gene expression data sets and achieve zero type-I and\ntype-II error results on all data sets.\n", "title": "Kernel Two-Sample Hypothesis Testing Using Kernel Set Classification" }
null
null
null
null
true
null
13878
null
Default
null
null
null
{ "abstract": " The least square Monte Carlo (LSM) algorithm proposed by Longstaff and\nSchwartz [2001] is the most widely used method for pricing options with early\nexercise features. The LSM estimator contains look-ahead bias, and the\nconventional technique of removing it necessitates an independent set of\nsimulations. This study proposes a new approach for efficiently eliminating\nlook-ahead bias by using the leave-one-out method, a well-known\ncross-validation technique for machine learning applications. The leave-one-out\nLSM (LOOLSM) method is illustrated with examples, including multi-asset options\nwhose LSM price is biased high. The asymptotic behavior of look-ahead bias is\nalso discussed with the LOOLSM approach.\n", "title": "An Efficient Approach for Removing Look-ahead Bias in the Least Square Monte Carlo Algorithm: Leave-One-Out" }
null
null
null
null
true
null
13879
null
Default
null
null
null
{ "abstract": " Humans can ground natural language commands to tasks at both abstract and\nfine-grained levels of specificity. For instance, a human forklift operator can\nbe instructed to perform a high-level action, like \"grab a pallet\" or a\nlow-level action like \"tilt back a little bit.\" While robots are also capable\nof grounding language commands to tasks, previous methods implicitly assume\nthat all commands and tasks reside at a single, fixed level of abstraction.\nAdditionally, methods that do not use multiple levels of abstraction encounter\ninefficient planning and execution times as they solve tasks at a single level\nof abstraction with large, intractable state-action spaces closely resembling\nreal world complexity. In this work, by grounding commands to all the tasks or\nsubtasks available in a hierarchical planning framework, we arrive at a model\ncapable of interpreting language at multiple levels of specificity ranging from\ncoarse to more granular. We show that the accuracy of the grounding procedure\nis improved when simultaneously inferring the degree of abstraction in language\nused to communicate the task. Leveraging hierarchy also improves efficiency:\nour proposed approach enables a robot to respond to a command within one second\non 90% of our tasks, while baselines take over twenty seconds on half the\ntasks. Finally, we demonstrate that a real, physical robot can ground commands\nat multiple levels of abstraction allowing it to efficiently plan different\nsubtasks within the same planning hierarchy.\n", "title": "Accurately and Efficiently Interpreting Human-Robot Instructions of Varying Granularities" }
null
null
null
null
true
null
13880
null
Default
null
null
null
{ "abstract": " We study a nonlocal Venttsel' problem in a non-convex bounded domain with a\nKoch-type boundary. Regularity results of the strict solution are proved in\nweighted Sobolev spaces. The numerical approximation of the problem is carried\nout and optimal a priori error estimates are obtained.\n", "title": "Nonlocal Venttsel' diffusion in fractal-type domains: regularity results and numerical approximation" }
null
null
[ "Mathematics" ]
null
true
null
13881
null
Validated
null
null
null
{ "abstract": " We generalize the concept of the spin-momentum locking to magnonic systems\nand derive the formula to calculate the spin expectation value for one-magnon\nstates of general two-body spin Hamiltonians. We give no-go conditions for\nmagnon spin to be independent of momentum. As examples of the magnon\nspin-momentum locking, we analyze a one-dimensional antiferromagnet with the\nNéel order and two-dimensional kagome lattice antiferromagnets with the\n120$^\\circ$ structure. We find that the magnon spin depends on its momentum\neven when the Hamiltonian has the $z$-axis spin rotational symmetry, which can\nbe explained in the context of a singular band point or a $U(1)$ symmetry\nbreaking. A spin vortex in momentum space generated in a kagome lattice\nantiferromagnet has the winding number $Q=-2$, while the typical one observed\nin topological insulator surface states is characterized by $Q=+1$. A magnonic\nanalogue of the surface states, the Dirac magnon with $Q=+1$, is found in\nanother kagome lattice antiferromagnet. We also derive the sum rule for $Q$ by\nusing the Poincaré-Hopf index theorem.\n", "title": "Magnon Spin-Momentum Locking: Various Spin Vortices and Dirac Magnons in Noncollinear Antiferromagnets" }
null
null
null
null
true
null
13882
null
Default
null
null
null
{ "abstract": " The method of evaluation outlined in a previous work has been utilized here\nto evaluate certain other three- electron and four- electron atomic integrals\ninvolving s Slater-type orbitals and exponential correlation with unlinked\n$r_{ij}$'s. Limiting expressions for various such integrals have been derived,\nwhich has not been done earlier. Closed-form expressions for $<r_{12} r_{13} /\nr_{14}>$, $<r_{12}r_{34}/r_{23}>$, $<r_{12}r_{23}/r_{34}>$,\n$<r_{12}r_{13}/r_{34}>$ and $<r_{12}r_{34}/r_{13}>$ have been obtained.\n", "title": "Analytic evaluation of some three- and four- electron atomic integrals involving s STO's and exponential correlation with unlinked $r_{ij}$'s" }
null
null
null
null
true
null
13883
null
Default
null
null
null
{ "abstract": " For distributed computing environment, we consider the empirical risk\nminimization problem and propose a distributed and communication-efficient\nNewton-type optimization method. At every iteration, each worker locally finds\nan Approximate NewTon (ANT) direction, which is sent to the main driver. The\nmain driver, then, averages all the ANT directions received from workers to\nform a {\\it Globally Improved ANT} (GIANT) direction. GIANT is highly\ncommunication efficient and naturally exploits the trade-offs between local\ncomputations and global communications in that more local computations result\nin fewer overall rounds of communications. Theoretically, we show that GIANT\nenjoys an improved convergence rate as compared with first-order methods and\nexisting distributed Newton-type methods. Further, and in sharp contrast with\nmany existing distributed Newton-type methods, as well as popular first-order\nmethods, a highly advantageous practical feature of GIANT is that it only\ninvolves one tuning parameter. We conduct large-scale experiments on a computer\ncluster and, empirically, demonstrate the superior performance of GIANT.\n", "title": "GIANT: Globally Improved Approximate Newton Method for Distributed Optimization" }
null
null
null
null
true
null
13884
null
Default
null
null
null
{ "abstract": " It is known that input-output approaches based on scaled small-gain theorems\nwith constant $D$-scalings and integral linear constraints are non-conservative\nfor the analysis of some classes of linear positive systems interconnected with\nuncertain linear operators. This dramatically contrasts with the case of\ngeneral linear systems with delays where input-output approaches provide, in\ngeneral, sufficient conditions only. Using these results we provide simple\nalternative proofs for many of the existing results on the stability of linear\npositive systems with discrete/distributed/neutral time-invariant/-varying\ndelays and linear difference equations. In particular, we give a simple proof\nfor the characterization of diagonal Riccati stability for systems with\ndiscrete-delays and generalize this equation to other types of delay systems.\nThe fact that all those results can be reproved in a very simple way\ndemonstrates the importance and the efficiency of the input-output framework\nfor the analysis of linear positive systems. The approach is also used to\nderive performance results evaluated in terms of the $L_1$-, $L_2$- and\n$L_\\infty$-gains. It is also flexible enough to be used for design purposes.\n", "title": "Stability and performance analysis of linear positive systems with delays using input-output methods" }
null
null
null
null
true
null
13885
null
Default
null
null
null
{ "abstract": " Online social media have become an integral part of our social beings.\nAnalyzing conversations in social media platforms can lead to complex\nprobabilistic models to understand social interaction networks. In this paper,\nwe present a modeling approach for characterizing social interaction networks\nby jointly inferring user communities and interests based on social media\ninteractions. We present several pattern inference models: i) Interest pattern\nmodel (IPM) captures population level interaction topics, ii) User interest\npattern model (UIPM) captures user specific interaction topics, and iii)\nCommunity interest pattern model (CIPM) captures both community structures and\nuser interests. We test our methods on Twitter data collected from Purdue\nUniversity community. From our model results, we observe the interaction topics\nand communities related to two big events within Purdue University community,\nnamely Purdue Day of Giving and Senator Bernie Sanders' visit to Purdue\nUniversity as part of Indiana Primary Election 2016. Constructing social\ninteraction networks based on user interactions accounts for the similarity of\nusers' interactions on various topics of interest and indicates their community\nbelonging further beyond connectivity. We observed that the\ndegree-distributions of such networks follow power-law that is indicative of\nthe existence of fewer nodes in the network with higher levels of interactions,\nand many other nodes with less interactions. We also discuss the application of\nsuch networks as a useful tool to effectively disseminate specific information\nto the target audience towards planning any large-scale events and demonstrate\nhow to single out specific nodes in a given community by running network\nalgorithms.\n", "title": "Joint Inference of User Community and Interest Patterns in Social Interaction Networks" }
null
null
null
null
true
null
13886
null
Default
null
null
null
{ "abstract": " A theory is proposed, in which the basic elements of reality are assumed to\nbe something called modes. Particles are interpreted as composites of modes,\ncorresponding to eigenstates of the interaction Hamiltonian of modes. At the\nfundamental level of the proposed theory, there are two basic modes only,whose\nspinor spaces are the two smallest nontrivial representation spaces of the\nSL(2,C) group, one being the complex conjugate of the other. All other modes\nare constructed from the two basic modes, making use of the operations of\ndirect sum and direct product for related spinor spaces. Accompanying the\nconstruction of direct-product modes, interactions among modes are introduced\nin a natural way, with the interaction Hamiltonian given from mappings between\nthe corresponding state spaces. The interaction Hamiltonian thus obtained turn\nout to possess a form, which is similar to a major part of the interaction\nHamiltonian in the Glashow-Weinberg-Salam electroweak theory. In the proposed\ntheory, it is possible for the second-order perturbation expansion of energy to\nbe free from ultraviolet divergence. This expansion is used to derive some\napproximate relations for neutrino masses; in particular, a rough estimate is\nobtained for the ratio of mass differences of neutrinos, which gives the\ncorrect order of magnitude compared with the experimental result.\n", "title": "A mode theory for the electoweak interaction and its application to neutrino masses" }
null
null
null
null
true
null
13887
null
Default
null
null
null
{ "abstract": " The segmentation of large scale power grids into zones is crucial for control\nroom operators when managing the grid complexity near real time. In this paper\nwe propose a new method in two steps which is able to automatically do this\nsegmentation, while taking into account the real time context, in order to help\nthem handle shifting dynamics. Our method relies on a \"guided\" machine learning\napproach. As a first step, we define and compute a task specific \"Influence\nGraph\" in a guided manner. We indeed simulate on a grid state chosen\ninterventions, representative of our task of interest (managing active power\nflows in our case). For visualization and interpretation, we then build a\nhigher representation of the grid relevant to this task by applying the graph\ncommunity detection algorithm \\textit{Infomap} on this Influence Graph. To\nillustrate our method and demonstrate its practical interest, we apply it on\ncommonly used systems, the IEEE-14 and IEEE-118. We show promising and original\ninterpretable results, especially on the previously well studied RTS-96 system\nfor grid segmentation. We eventually share initial investigation and results on\na large-scale system, the French power grid, whose segmentation had a\nsurprising resemblance with RTE's historical partitioning.\n", "title": "Guided Machine Learning for power grid segmentation" }
null
null
null
null
true
null
13888
null
Default
null
null
null
{ "abstract": " $^{121,123}Sb$ nuclear quadrupole resonance (NQR) was applied to\n$Fe(Sb_{1-x}Te_x)_2$ in the low doping regime (\\emph{x = 0, 0.01} and\n\\emph{0.05}) as a microscopic zero field probe to study the evolution of\n\\emph{3d} magnetism and the emergence of metallic behavior. Whereas the NQR\nspectra itself reflects the degree of local disorder via the width of the\nindividual NQR lines, the spin lattice relaxation rate (SLRR) $1/T_1(T)$ probes\nthe fluctuations at the $Sb$ - site. The fluctuations originate either from\nconduction electrons or from magnetic moments. In contrast to the semi metal\n$FeSb_2$ with a clear signature of the charge and spin gap formation in\n$1/T_1(T)T ( \\sim exp/ (\\Delta k_BT) ) $, the 1\\% $Te$ doped system exhibits\nalmost metallic conductivity and a almost filled gap. A weak divergence of the\nSLRR coefficient $1/T_1(T)T \\sim T^{-n} \\sim T^{-0.2}$ points towards the\npresence of electronic correlations towards low temperatures wheras the\n\\textit{5\\%} $Te$ doped sample exhibits a much larger divergence in the SLRR\ncoefficient showing $1/T_1(T)T \\sim T^{-0.72} $. According to the specific heat\ndivergence a power law with $n\\ =\\ 2\\ m\\ =\\ 0.56$ is expected for the SLRR.\nFurthermore $Te$-doped $FeSb_2$ as a disordered paramagnetic metal might be a\nplatform for the electronic Griffith phase scenario. NQR evidences a\nsubstantial asymmetric broadening of the $^{121,123}Sb$ NQR spectrum for the\n\\emph{5\\%} sample. This has purely electronic origin in agreement with the\nelectronic Griffith phase and stems probably from an enhanced $Sb$-$Te$ bond\npolarization and electronic density shift towards the $Te$ atom inside\n$Sb$-$Te$ dumbbell.\n", "title": "121,123Sb NQR as a microscopic probe in Te doped correlated semimetal FeSb2 : emergence of electronic Griffith phase, magnetism and metallic behavior %" }
null
null
null
null
true
null
13889
null
Default
null
null
null
{ "abstract": " There are many statistical tests that verify the null hypothesis: the\nvariable of interest has the same distribution among k-groups. But once the\nnull hypothesis is rejected, how to present the structure of dissimilarity\nbetween groups? In this article, we introduce The Merging Path Plot - a\nmethodology, and factorMerger - an R package, for exploration and visualization\nof k-group dissimilarities. Comparison of k-groups is one of the most important\nissues in exploratory analyses and it has zillions of applications. The\nclassical solution is to test a~null hypothesis that observations from all\ngroups come from the same distribution. If the global null hypothesis is\nrejected, a~more detailed analysis of differences among pairs of groups is\nperformed. The traditional approach is to use pairwise post hoc tests in order\nto verify which groups differ significantly. However, this approach fails with\na large number of groups in both interpretation and visualization layer.\nThe~Merging Path Plot methodology solves this problem by using an\neasy-to-understand description of dissimilarity among groups based on\nLikelihood Ratio Test (LRT) statistic.\n", "title": "The Merging Path Plot: adaptive fusing of k-groups with likelihood-based model selection" }
null
null
null
null
true
null
13890
null
Default
null
null
null
{ "abstract": " In this paper, we present a new method of measuring Hubble parameter($H(z)$),\nmaking use of the anisotropy of luminosity distance($d_{L}$), and the analysis\nof gravitational wave(GW) of neutron star(NS) binary system. The method has\nnever been put into practice before due to the lack of the ability of detecting\nGW. LIGO's success in detecting GW of black hole(BH) binary system merger\nannounced the possibility of this new method. We apply this method to several\nGW detecting projects, including Advanced LIGO(Adv-LIGO), Einstein\nTelescope(ET) and DECIGO, finding that the $H(z)$ by Adv-LIGO and ET is of bad\naccuracy, while the $H(z)$ by DECIGO shows a good accuracy. We use the error\ninformation of $H(z)$ by DECIGO to simulate $H(z)$ data at every 0.1 redshift\nspan, and put the mock data into the forecasting of cosmological parameters.\nCompared with the available 38 observed $H(z)$ data(OHD), mock data shows an\nobviously tighter constraint on cosmological parameters, and a concomitantly\nhigher value of Figure of Merit(FoM). For a 3-year-observation by standard\nsirens of DECIGO, the FoM value is as high as 834.9. If a 10-year-observation\nis launched, the FoM could reach 2783.1. For comparison, the FoM of 38 actual\nobserved $H(z)$ data is 9.3. These improvement indicates that the new method\nhas great potential in further cosmological constraints.\n", "title": "Constraint on cosmological parameters by Hubble parameter from gravitational wave standard sirens of neutron star binary system" }
null
null
null
null
true
null
13891
null
Default
null
null
null
{ "abstract": " Testing whether a probability distribution is compatible with a given\nBayesian network is a fundamental task in the field of causal inference, where\nBayesian networks model causal relations. Here we consider the class of causal\nstructures where all correlations between observed quantities are solely due to\nthe influence from latent variables. We show that each model of this type\nimposes a certain signature on the observable covariance matrix in terms of a\nparticular decomposition into positive semidefinite components. This signature,\nand thus the underlying hypothetical latent structure, can be tested in a\ncomputationally efficient manner via semidefinite programming. This stands in\nstark contrast with the algebraic geometric tools required if the full\nobservable probability distribution is taken into account. The semidefinite\ntest is compared with tests based on entropic inequalities.\n", "title": "Semidefinite tests for latent causal structures" }
null
null
[ "Mathematics", "Statistics" ]
null
true
null
13892
null
Validated
null
null
null
{ "abstract": " The Peierls-Nabarro (PN) model for dislocations is a hybrid model that\nincorporates the atomistic information of the dislocation core structure into\nthe continuum theory. In this paper, we study the convergence from a full\natomistic model to the PN model with $\\gamma$-surface for the dislocation in a\nbilayer system (e.g. bilayer graphene). We prove that the displacement field of\nand the total energy of the dislocation solution of the PN model are\nasymptotically close to those of the full atomistic model. Our work can be\nconsidered as a generalization of the analysis of the convergence from\natomistic model to Cauchy-Born rule for crystals without defects in the\nliterature.\n", "title": "From atomistic model to the Peierls-Nabarro model with $γ$-surface for dislocations" }
null
null
null
null
true
null
13893
null
Default
null
null
null
{ "abstract": " We consider partial torsion fields (fields generated by a root of a division\npolynomial) for elliptic curves. By analysing the reduction properties of\nelliptic curves, and applying the Montes Algorithm, we obtain information about\nthe ring of integers. In particular, for the partial $3$-torsion fields for a\ncertain one-parameter family of non-CM elliptic curves, we describe a power\nbasis. As a result, we show that the one-parameter family of quartic $S_4$\nfields given by $T^4 - 6T^2 - \\alpha T - 3$ for $\\alpha \\in \\mathbb{Z}$ such\nthat $\\alpha \\pm 8$ are squarefree, are monogenic.\n", "title": "A family of monogenic $S_4$ quartic fields arising from elliptic curves" }
null
null
null
null
true
null
13894
null
Default
null
null
null
{ "abstract": " Data aggregation is a promising approach to enable massive machine-type\ncommunication (mMTC). This paper focuses on the aggregation phase where a\nmassive number of machine-type devices (MTDs) transmit to aggregators. By using\nnon-orthogonal multiple access (NOMA) principles, we allow several MTDs to\nshare the same orthogonal channel in our proposed hybrid access scheme. We\ndevelop an analytical framework based on stochastic geometry to investigate the\nsystem performance in terms of average success probability and average number\nof simultaneously served MTDs, under imperfect successive interference\ncancellation (SIC) at the aggregators, for two scheduling schemes: random\nresource scheduling (RRS) and channel-aware resource scheduling (CRS). We\nidentify the power constraints on the MTDs sharing the same channel to attain a\nfair coexistence with purely orthogonal multiple access (OMA) setups, then\npower control coefficients are found so that these MTDs perform with similar\nreliability. We show that under high access demand, the hybrid scheme with CRS\noutperforms the OMA setup by simultaneously serving more MTDs with reduced\npower consumption.\n", "title": "Aggregation and Resource Scheduling in Machine-type Communication Networks: A Stochastic Geometry Approach" }
null
null
[ "Computer Science", "Statistics" ]
null
true
null
13895
null
Validated
null
null
null
{ "abstract": " Digital games are one of the major and most important fields on the\nentertainment domain, which also involves cinema and music. Numerous attempts\nhave been done to improve the quality of the games including more realistic\nartistic production and computer science. Assessing the player's behavior, a\ntask known as player modeling, is currently the need of the hour which leads to\npossible improvements in terms of: (i) better game interaction experience, (ii)\nbetter exploitation of the relationship between players, and (iii)\nincreasing/maintaining the number of players interested in the game. In this\npaper we model players using the basic four behaviors proposed in\n\\cite{BartleArtigo}, namely: achiever, explorer, socializer and killer. Our\nanalysis is carried out using data obtained from the game \"World of Warcraft\"\nover 3 years (2006 $-$ 2009). We employ a semi-supervised learning technique in\norder to find out characteristics that possibly impact player's behavior.\n", "title": "Setting Players' Behaviors in World of Warcraft through Semi-Supervised Learning" }
null
null
[ "Computer Science" ]
null
true
null
13896
null
Validated
null
null
null
{ "abstract": " In this letter, we report our systematic construction of the lattice\nHamiltonian model of topological orders on open surfaces, with explicit\nboundary terms. We do this mainly for the Levin-Wen stringnet model. The full\nHamiltonian in our approach yields a topologically protected, gapped energy\nspectrum, with the corresponding wave functions robust under\ntopology-preserving transformations of the lattice of the system. We explicitly\npresent the wavefunctions of the ground states and boundary elementary\nexcitations. We construct the creation and hopping operators of boundary\nquasi-particles. We find that given a bulk topological order, the gapped\nboundary conditions are classified by Frobenius algebras in its input data.\nEmergent topological properties of the ground states and boundary excitations\nare characterized by (bi-) modules over Frobenius algebras.\n", "title": "Boundary Hamiltonian theory for gapped topological orders" }
null
null
[ "Physics", "Mathematics" ]
null
true
null
13897
null
Validated
null
null
null
{ "abstract": " We prove an identity relating the product of two opposite Schubert varieties\nin the (equivariant) quantum K-theory ring of a cominuscule flag variety to the\nminimal degree of a rational curve connecting the Schubert varieties. We deduce\nthat the sum of the structure constants associated to any product of Schubert\nclasses is equal to one. Equivalently, the sheaf Euler characteristic map\nextends to a ring homomorphism defined on the quantum K-theory ring.\n", "title": "Euler characteristics of cominuscule quantum K-theory" }
null
null
null
null
true
null
13898
null
Default
null
null
null
{ "abstract": " Dialogue Act recognition associate dialogue acts (i.e., semantic labels) to\nutterances in a conversation. The problem of associating semantic labels to\nutterances can be treated as a sequence labeling problem. In this work, we\nbuild a hierarchical recurrent neural network using bidirectional LSTM as a\nbase unit and the conditional random field (CRF) as the top layer to classify\neach utterance into its corresponding dialogue act. The hierarchical network\nlearns representations at multiple levels, i.e., word level, utterance level,\nand conversation level. The conversation level representations are input to the\nCRF layer, which takes into account not only all previous utterances but also\ntheir dialogue acts, thus modeling the dependency among both, labels and\nutterances, an important consideration of natural dialogue. We validate our\napproach on two different benchmark data sets, Switchboard and Meeting Recorder\nDialogue Act, and show performance improvement over the state-of-the-art\nmethods by $2.2\\%$ and $4.1\\%$ absolute points, respectively. It is worth\nnoting that the inter-annotator agreement on Switchboard data set is $84\\%$,\nand our method is able to achieve the accuracy of about $79\\%$ despite being\ntrained on the noisy data.\n", "title": "Dialogue Act Sequence Labeling using Hierarchical encoder with CRF" }
null
null
[ "Computer Science" ]
null
true
null
13899
null
Validated
null
null
null
{ "abstract": " Questions of noise stability play an important role in hardness of\napproximation in computer science as well as in the theory of voting. In many\napplications, the goal is to find an optimizer of noise stability among all\npossible partitions of $\\mathbb{R}^n$ for $n \\geq 1$ to $k$ parts with given\nGaussian measures $\\mu_1,\\ldots,\\mu_k$. We call a partition $\\epsilon$-optimal,\nif its noise stability is optimal up to an additive $\\epsilon$. In this paper,\nwe give an explicit, computable function $n(\\epsilon)$ such that an\n$\\epsilon$-optimal partition exists in $\\mathbb{R}^{n(\\epsilon)}$. This result\nhas implications for the computability of certain problems in non-interactive\nsimulation, which are addressed in a subsequent work.\n", "title": "Noise Stability is computable and low dimensional" }
null
null
null
null
true
null
13900
null
Default
null
null