text
null
inputs
dict
prediction
null
prediction_agent
null
annotation
list
annotation_agent
null
multi_label
bool
1 class
explanation
null
id
stringlengths
1
5
metadata
null
status
stringclasses
2 values
event_timestamp
null
metrics
null
null
{ "abstract": " We give a complete formula for the characteristic polynomial of hyperplane\narrangements $\\mathcal J_n$ consisting of the hyperplanes $x_i+x_j=1$, $x_k=0$,\n$x_l=1$, $ 1\\leq i, j, k, l\\leq n$. The formula is obtained by associating\nhyperplane arrangements with graphs, and then enumerating central graphs via\ngenerating functions for the number of bipartite graphs of given order, size\nand number of connected components.\n", "title": "Enumeration of Graphs and the Characteristic Polynomial of the Hyperplane Arrangements $\\mathcal{J}_n$" }
null
null
null
null
true
null
9401
null
Default
null
null
null
{ "abstract": " Large-scale wireless testbeds have been setup in the last years with the goal\nto study wireless multi-hop networks in more realistic environments. Since the\nsetup and operation of such a testbed is expensive in terms of money, time, and\nlabor, the crucial question rises whether this effort is justified with the\nscientific results the testbed generates.\nIn this paper, we give an answer to this question based on our experience\nwith the DES-Testbed, a large-scale wireless sensor network and wireless mesh\nnetwork testbed. The DES-Testbed has been operated for almost 5 years. Our\nanalysis comprises more than 1000 experiments that have been run on the testbed\nin the years 2010 and 2011. We discuss the scientific value in respect to the\neffort of experimentation.\n", "title": "On the Scientific Value of Large-scale Testbeds for Wireless Multi-hop Networks" }
null
null
[ "Computer Science" ]
null
true
null
9402
null
Validated
null
null
null
{ "abstract": " Nonrapid eye movement (NREM) sleep desaturation may cause neuronal damage due\nto the withdrawal of cerebrovascular reactivity. The current study (1) assessed\nthe prevalence of NREM sleep desaturation in nonhypoxemic patients with chronic\nobstructive pulmonary disease (COPD) and (2) compared a biological marker of\ncerebral lesion and neuromuscular function in patients with and without NREM\nsleep desaturation.\n", "title": "Brain Damage and Motor Cortex Impairment in Chronic Obstructive Pulmonary Disease: Implication of Nonrapid Eye Movement Sleep Desaturation" }
null
null
[ "Physics" ]
null
true
null
9403
null
Validated
null
null
null
{ "abstract": " In this paper, we present FPT-algorithms for special cases of the shortest\nvector problem (SVP) and the integer linear programming problem (ILP), when\nmatrices included to the problems' formulations are near square. The main\nparameter is the maximal absolute value of rank minors of matrices included to\nthe problem formulation. Additionally, we present FPT-algorithms with respect\nto the same main parameter for the problems, when the matrices have no singular\nrank sub-matrices.\n", "title": "FPT-algorithms for The Shortest Lattice Vector and Integer Linear Programming Problems" }
null
null
null
null
true
null
9404
null
Default
null
null
null
{ "abstract": " Purpose: To improve kidney segmentation in clinical ultrasound (US) images,\nwe develop a new graph cuts based method to segment kidney US images by\nintegrating original image intensity information and texture feature maps\nextracted using Gabor filters. Methods: To handle large appearance variation\nwithin kidney images and improve computational efficiency, we build a graph of\nimage pixels close to kidney boundary instead of building a graph of the whole\nimage. To make the kidney segmentation robust to weak boundaries, we adopt\nlocalized regional information to measure similarity between image pixels for\ncomputing edge weights to build the graph of image pixels. The localized graph\nis dynamically updated and the GC based segmentation iteratively progresses\nuntil convergence. The proposed method has been evaluated and compared with\nstate of the art image segmentation methods based on clinical kidney US images\nof 85 subjects. We randomly selected US images of 20 subjects as training data\nfor tuning the parameters, and validated the methods based on US images of the\nremaining 65 subjects. The segmentation results have been quantitatively\nanalyzed using 3 metrics, including Dice Index, Jaccard Index, and Mean\nDistance. Results: Experiment results demonstrated that the proposed method\nobtained segmentation results for bilateral kidneys of 65 subjects with average\nDice index of 0.9581, Jaccard index of 0.9204, and Mean Distance of 1.7166,\nbetter than other methods under comparison (p<10-19, paired Wilcoxon rank sum\ntests). Conclusions: The proposed method achieved promising performance for\nsegmenting kidneys in US images, better than segmentation methods that built on\nany single channel of image information. This method will facilitate extraction\nof kidney characteristics that may predict important clinical outcomes such\nprogression chronic kidney disease.\n", "title": "A dynamic graph-cuts method with integrated multiple feature maps for segmenting kidneys in ultrasound images" }
null
null
null
null
true
null
9405
null
Default
null
null
null
{ "abstract": " Human mobility is known to be distributed across several orders of magnitude\nof physical distances , which makes it generally difficult to endogenously find\nor define typical and meaningful scales. Relevant analyses, from movements to\ngeographical partitions, seem to be relative to some ad-hoc scale, or no scale\nat all. Relying on geotagged data collected from photo-sharing social media, we\napply community detection to movement networks constrained by increasing\npercentiles of the distance distribution. Using a simple parameter-free\ndiscontinuity detection algorithm, we discover clear phase transitions in the\ncommunity partition space. The detection of these phases constitutes the first\nobjective method of characterising endogenous, natural scales of human\nmovement. Our study covers nine regions, ranging from cities to countries of\nvarious sizes and a transnational area. For all regions, the number of natural\nscales is remarkably low (2 or 3). Further, our results hint at scale-related\nbehaviours rather than scale-related users. The partitions of the natural\nscales allow us to draw discrete multi-scale geographical boundaries,\npotentially capable of providing key insights in fields such as epidemiology or\ncultural contagion where the introduction of spatial boundaries is pivotal.\n", "title": "Natural Scales in Geographical Patterns" }
null
null
null
null
true
null
9406
null
Default
null
null
null
{ "abstract": " THz time-domain spectroscopy in transmission mode was applied to study dry\nand wet drawing inks. In specific, cochineal-, indigo- and iron-gall based inks\nhave been investigated; some prepared following ancient recipes and others by\nusing synthetic materials. The THz investigations have been realized on both\npellet samples, made by dried inks blended with polyethylene powder, and\nlayered inks, made by liquid deposition on polyethylene pellicles. We\nimplemented an improved THz spectroscopic technique that enabled the\nmeasurement of the material optical parameters and thicknesses of the layered\nink samples on absolute scale. This experimental investigation shows that the\nTHz techniques have the potentiality to recognize drawing inks by their\nspectroscopic features.\n", "title": "Drawing materials studied by THz spectroscopy" }
null
null
null
null
true
null
9407
null
Default
null
null
null
{ "abstract": " With the advancement of treatment modalities in radiation therapy for cancer\npatients, outcomes have improved, but at the cost of increased treatment plan\ncomplexity and planning time. The accurate prediction of dose distributions\nwould alleviate this issue by guiding clinical plan optimization to save time\nand maintain high quality plans. We have modified a convolutional deep network\nmodel, U-net (originally designed for segmentation purposes), for predicting\ndose from patient image contours of the planning target volume (PTV) and organs\nat risk (OAR). We show that, as an example, we are able to accurately predict\nthe dose of intensity-modulated radiation therapy (IMRT) for prostate cancer\npatients, where the average Dice similarity coefficient is 0.91 when comparing\nthe predicted vs. true isodose volumes between 0% and 100% of the prescription\ndose. The average value of the absolute differences in [max, mean] dose is\nfound to be under 5% of the prescription dose, specifically for each structure\nis [1.80%, 1.03%](PTV), [1.94%, 4.22%](Bladder), [1.80%, 0.48%](Body), [3.87%,\n1.79%](L Femoral Head), [5.07%, 2.55%](R Femoral Head), and [1.26%,\n1.62%](Rectum) of the prescription dose. We thus managed to map a desired\nradiation dose distribution from a patient's PTV and OAR contours. As an\nadditional advantage, relatively little data was used in the techniques and\nmodels described in this paper.\n", "title": "A feasibility study for predicting optimal radiation therapy dose distributions of prostate cancer patients from patient anatomy using deep learning" }
null
null
null
null
true
null
9408
null
Default
null
null
null
{ "abstract": " Bacterial DNA gyrase introduces negative supercoils into chromosomal DNA and\nrelaxes positive supercoils introduced by replication and transiently by\ntranscription. Removal of these positive supercoils is essential for\nreplication fork progression and for the overall unlinking of the two duplex\nDNA strands, as well as for ongoing transcription. To address how gyrase copes\nwith these topological challenges, we used high-speed single-molecule\nfluorescence imaging in live Escherichia coli cells. We demonstrate that at\nleast 300 gyrase molecules are stably bound to the chromosome at any time, with\n~12 enzymes enriched near each replication fork. Trapping of reaction\nintermediates with ciprofloxacin revealed complexes undergoing catalysis. Dwell\ntimes of ~2 s were observed for the dispersed gyrase molecules, which we\npropose maintain steady-state levels of negative supercoiling of the\nchromosome. In contrast, the dwell time of replisome-proximal molecules was ~8\ns, consistent with these catalyzing processive positive supercoil relaxation in\nfront of the progressing replisome.\n", "title": "Single-molecule imaging of DNA gyrase activity in living Escherichia coli" }
null
null
null
null
true
null
9409
null
Default
null
null
null
{ "abstract": " From self-driving vehicles and back-flipping robots to virtual assistants who\nbook our next appointment at the hair salon or at that restaurant for dinner -\nmachine learning systems are becoming increasingly ubiquitous. The main reason\nfor this is that these methods boast remarkable predictive capabilities.\nHowever, most of these models remain black boxes, meaning that it is very\nchallenging for humans to follow and understand their intricate inner workings.\nConsequently, interpretability has suffered under this ever-increasing\ncomplexity of machine learning models. Especially with regards to new\nregulations, such as the General Data Protection Regulation (GDPR), the\nnecessity for plausibility and verifiability of predictions made by these black\nboxes is indispensable. Driven by the needs of industry and practice, the\nresearch community has recognised this interpretability problem and focussed on\ndeveloping a growing number of so-called explanation methods over the past few\nyears. These methods explain individual predictions made by black box machine\nlearning models and help to recover some of the lost interpretability. With the\nproliferation of these explanation methods, it is, however, often unclear,\nwhich explanation method offers a higher explanation quality, or is generally\nbetter-suited for the situation at hand. In this thesis, we thus propose an\naxiomatic framework, which allows comparing the quality of different\nexplanation methods amongst each other. Through experimental validation, we\nfind that the developed framework is useful to assess the explanation quality\nof different explanation methods and reach conclusions that are consistent with\nindependent research.\n", "title": "Shedding Light on Black Box Machine Learning Algorithms: Development of an Axiomatic Framework to Assess the Quality of Methods that Explain Individual Predictions" }
null
null
null
null
true
null
9410
null
Default
null
null
null
{ "abstract": " Imidazolium based porous cationic polymers were synthesized using an\ninnovative and facile approach, which takes advantage of the Debus Radziszewski\nreaction to obtain meso- and microporous polymers following click chemistry\nprinciples. In the obtained set of materials, click based porous cationic\npolymers have the same cationic backbone, whereas they bear the commonly used\nanions of imidazolium poly(ionic liquid)s. These materials show hierarchical\nporosity and a good specific surface area. Furthermore, their chemical\nstructure was extensively characterized using ATR FTIR and SS NMR\nspectroscopies, and HR MS. These polymers show good performance towards carbon\ndioxide sorption, especially those possessing the acetate anion. This polymer\nhas an uptake of 2 mmol per g of CO2 at 1 bar and 273 K, a value which is among\nthe highest recorded for imidazolium poly(ionic liquid)s. These polymers were\nalso modified in order to introduce N-heterocyclic carbenes along the backbone.\nCarbon dioxide loading in the carbene-containing polymer is in the same range\nas that of the non-modified versions, but the nature of the interaction is\nsubstantially different. The combined use of in situ FTIR spectroscopy and\nmicrocalorimetry evidenced a chemisorption phenomenon that brings about the\nformation of an imidazolium carboxylate zwitterion.\n", "title": "Click-based porous cationic polymers for enhanced carbon dioxide capture" }
null
null
null
null
true
null
9411
null
Default
null
null
null
{ "abstract": " We discuss similarity between oscillons and oscillational mode in perturbed\n$\\phi^4$. For small depths of the perturbing potential it is difficult to\ndistinguish between oscillons and the mode in moderately long time evolution,\nmoreover one can transform one into the other by adiabatically switching on and\noff the potential. Basins of attraction are presented in the parameter space\ndescribing the potential and initial conditions.\n", "title": "Oscillons in the presence of external potential" }
null
null
null
null
true
null
9412
null
Default
null
null
null
{ "abstract": " Highly eccentric binary systems appear in many astrophysical contexts,\nranging from tidal capture in dense star clusters, precursors of stellar\ndisruption by massive black holes, to high-eccentricity migration of giant\nplanets. In a highly eccentric binary, the tidal potential of one body can\nexcite oscillatory modes in the other during a pericenter passage, resulting in\nenergy exchange between the modes and the binary orbit. These modes exhibit one\nof three behaviors over multiple passages: low-amplitude oscillations, large\namplitude oscillations corresponding to a resonance between the orbital\nfrequency and the mode frequency, and chaotic growth. We study these phenomena\nwith an iterative map, fully exploring how the mode evolution depends on the\npericenter distance and other parameters. In addition, we show that the\ndissipation of mode energy results in a quasi-steady state, with gradual\norbital decay punctuated by resonances, even in systems where the mode\namplitude would initially grow stochastically. A newly captured star around a\nblack hole can experience significant orbital decay and heating due to the\nchaotic growth of the mode amplitude and dissipation. A giant planet pushed\ninto a high-eccentricity orbit may experience a similar effect and become a hot\nor warm Jupiter.\n", "title": "Dynamical Tides in Highly Eccentric Binaries: Chaos, Dissipation and Quasi-Steady State" }
null
null
null
null
true
null
9413
null
Default
null
null
null
{ "abstract": " We study threefolds fibred by K3 surfaces admitting a lattice polarization by\na certain class of rank 19 lattices. We begin by showing that any family of\nsuch K3 surfaces is completely determined by a map from the base of the family\nto the appropriate K3 moduli space, which we call the generalized functional\ninvariant. Then we show that if the threefold total space is a smooth\nCalabi-Yau, there are only finitely many possibilities for the polarizing\nlattice and the form of the generalized functional invariant. Finally, we\nconstruct explicit examples of Calabi-Yau threefolds realizing each case and\ncompute their Hodge numbers.\n", "title": "Calabi-Yau threefolds fibred by high rank lattice polarized K3 surfaces" }
null
null
null
null
true
null
9414
null
Default
null
null
null
{ "abstract": " In this paper we will deal with Lipschitz continuous perturbations of\nMorse-Smale semigroups with only equilibrium points as critical elements. We\nstudy the behavior of the structure of equilibrium points and their connections\nwhen subjected to non-differentiable perturbations. To this end we define more\ngeneral notions of \\emph{hyperbolicity} and \\emph{transversality}, which do not\nrequire differentiability.\n", "title": "Lipschitz perturbations of Morse-Smale semigroups" }
null
null
null
null
true
null
9415
null
Default
null
null
null
{ "abstract": " Human trafficking is one of the most atrocious crimes and among the\nchallenging problems facing law enforcement which demands attention of global\nmagnitude. In this study, we leverage textual data from the website \"Backpage\"-\nused for classified advertisement- to discern potential patterns of human\ntrafficking activities which manifest online and identify advertisements of\nhigh interest to law enforcement. Due to the lack of ground truth, we rely on a\nhuman analyst from law enforcement, for hand-labeling a small portion of the\ncrawled data. We extend the existing Laplacian SVM and present S3VM-R, by\nadding a regularization term to exploit exogenous information embedded in our\nfeature space in favor of the task at hand. We train the proposed method using\nlabeled and unlabeled data and evaluate it on a fraction of the unlabeled data,\nherein referred to as unseen data, with our expert's further verification.\nResults from comparisons between our method and other semi-supervised and\nsupervised approaches on the labeled data demonstrate that our learner is\neffective in identifying advertisements of high interest to law enforcement\n", "title": "Semi-Supervised Learning for Detecting Human Trafficking" }
null
null
null
null
true
null
9416
null
Default
null
null
null
{ "abstract": " In May of 1935, Einstein published with two co-authors the famous EPR-paper\nabout entangled particles, which questioned the completeness of Quantum\nMechanics by means of a gedankenexperiment. Only one month later, he published\na work that seems unconnected to the EPR-paper at first, the so called\nEinstein-Rosen-paper, that presented a solution of the field equations for\nparticles in the framework of general relativity. Both papers ask for the\nconception of completeness in a theory and, from a modern perspective, it is\neasy to believe that there is a connection between these topics. We question\nwhether Einstein might have considered that a correlation between nonlocal\nfeatures of Quantum Mechanics and the Einstein-Rosen bridge can be used to\nexplain entanglement. We analyse this question by discussing the used\nconceptions of \"completeness,\" \"atomistic structure of matter,\" and \"quantum\nphenomena.\" We discuss the historical embedding of the two works and the\ncontext to modern research. Recent approaches are presented that formulate an\nEPR=ER principle and claim an equivalence of the basic principles of these two\npapers.\n", "title": "Einstein's 1935 papers: EPR=ER?" }
null
null
null
null
true
null
9417
null
Default
null
null
null
{ "abstract": " The rising interest in the construction and the quality of (business) process\nmodels resulted in an abundancy of emerged research studies and different\nfindings about process model quality. The lack of overview and the lack of\nconsensus hinder the development of the research field. The research objective\nis to collect, analyse, structure, and integrate the existing knowledge in a\ncomprehensive framework that strives to find a balance between completeness and\nrelevance without hindering the overview. The Systematic Literature Review\nmethodology was applied to collect the relevant studies. Because several\nstudies exist that each partially addresses this research objective, the review\nwas performed at a tertiary level. Based on a critical analysis of the\ncollected papers, a comprehensive, but structured overview of the state of the\nart in the field was composed. The existing academic knowledge about process\nmodel quality was carefully integrated and structured into the Comprehensive\nProcess Model Quality Framework (CPMQF). The framework summarizes 39 quality\ndimensions, 21 quality metrics, 28 quality (sub)drivers, 44 (sub)driver\nmetrics, 64 realization initiatives and 15 concrete process model purposes\nrelated to 4 types of organizational benefits, as well as the relations between\nall of these. This overview is thus considered to form a valuable instrument\nfor both researchers and practitioners that are concerned about process model\nquality. The framework is the first to address the concept of process model\nquality in such a comprehensive way.\n", "title": "An overview of process model quality literature - The Comprehensive Process Model Quality Framework" }
null
null
null
null
true
null
9418
null
Default
null
null
null
{ "abstract": " We study the performance of the Least Squares Estimator (LSE) in a general\nnonparametric regression model, when the errors are independent of the\ncovariates but may only have a $p$-th moment ($p\\geq 1$). In such a\nheavy-tailed regression setting, we show that if the model satisfies a standard\n`entropy condition' with exponent $\\alpha \\in (0,2)$, then the $L_2$ loss of\nthe LSE converges at a rate \\begin{align*}\n\\mathcal{O}_{\\mathbf{P}}\\big(n^{-\\frac{1}{2+\\alpha}} \\vee\nn^{-\\frac{1}{2}+\\frac{1}{2p}}\\big). \\end{align*} Such a rate cannot be improved\nunder the entropy condition alone.\nThis rate quantifies both some positive and negative aspects of the LSE in a\nheavy-tailed regression setting. On the positive side, as long as the errors\nhave $p\\geq 1+2/\\alpha$ moments, the $L_2$ loss of the LSE converges at the\nsame rate as if the errors are Gaussian. On the negative side, if\n$p<1+2/\\alpha$, there are (many) hard models at any entropy level $\\alpha$ for\nwhich the $L_2$ loss of the LSE converges at a strictly slower rate than other\nrobust estimators.\nThe validity of the above rate relies crucially on the independence of the\ncovariates and the errors. In fact, the $L_2$ loss of the LSE can converge\narbitrarily slowly when the independence fails.\nThe key technical ingredient is a new multiplier inequality that gives sharp\nbounds for the `multiplier empirical process' associated with the LSE. We\nfurther give an application to the sparse linear regression model with\nheavy-tailed covariates and errors to demonstrate the scope of this new\ninequality.\n", "title": "Convergence rates of least squares regression estimators with heavy-tailed errors" }
null
null
[ "Mathematics", "Statistics" ]
null
true
null
9419
null
Validated
null
null
null
{ "abstract": " Kriging is a widely employed technique, in particular for computer\nexperiments, in machine learning or in geostatistics. An important challenge\nfor Kriging is the computational burden when the data set is large. We focus on\na class of methods aiming at decreasing this computational cost, consisting in\naggregating Kriging predictors based on smaller data subsets. We prove that\naggregations based solely on the conditional variances provided by the\ndifferent Kriging predictors can yield an inconsistent final Kriging\nprediction. In contrasts, we study theoretically the recent proposal by\n[Rulli{è}re et al., 2017] and obtain additional attractive properties for it.\nWe prove that this predictor is consistent, we show that it can be interpreted\nas an exact conditional distribution for a modified process and we provide\nerror bounds for it.\n", "title": "Some properties of nested Kriging predictors" }
null
null
null
null
true
null
9420
null
Default
null
null
null
{ "abstract": " Inspired by the matching of supply to demand in logistical problems, the\noptimal transport (or Monge--Kantorovich) problem involves the matching of\nprobability distributions defined over a geometric domain such as a surface or\nmanifold. In its most obvious discretization, optimal transport becomes a\nlarge-scale linear program, which typically is infeasible to solve efficiently\non triangle meshes, graphs, point clouds, and other domains encountered in\ngraphics and machine learning. Recent breakthroughs in numerical optimal\ntransport, however, enable scalability to orders-of-magnitude larger problems,\nsolvable in a fraction of a second. Here, we discuss advances in numerical\noptimal transport that leverage understanding of both discrete and smooth\naspects of the problem. State-of-the-art techniques in discrete optimal\ntransport combine insight from partial differential equations (PDE) with convex\nanalysis to reformulate, discretize, and optimize transportation problems. The\nend result is a set of theoretically-justified models suitable for domains with\nthousands or millions of vertices. Since numerical optimal transport is a\nrelatively new discipline, special emphasis is placed on identifying and\nexplaining open problems in need of mathematical insight and additional\nresearch.\n", "title": "Optimal Transport on Discrete Domains" }
null
null
[ "Computer Science" ]
null
true
null
9421
null
Validated
null
null
null
{ "abstract": " With the prospect of the next generation of ground-based telescopes, the\nextremely large telescopes (ELTs), increasingly complex and demanding adaptive\noptics (AO) systems are needed. This is to compensate for image distortion\ncaused by atmospheric turbulence and fully take advantage of mirrors with\ndiameters of 30 to 40 m. This requires a more precise characterization of the\nturbulence. The PML (Profiler of Moon Limb) was developed within this context.\nThe PML aims to provide high-resolution altitude profiles of the turbulence\nusing differential measurements of the Moon limb position to calculate the\ntransverse spatio-angular covariance of the Angle of Arrival fluctuations. The\ncovariance of differential image motion for different separation angles is\nsensitive to the altitude distribution of the seeing. The use of the continuous\nMoon limb provides a large number of separation angles allowing for the\nhigh-resolution altitude of the profiles. The method is presented and tested\nwith simulated data. Moreover a PML instrument was deployed at the Sutherland\nObservatory in South Africa in August 2011. We present here the results of this\nmeasurement campaign.\n", "title": "High-Resolution Altitude Profiles of the Atmospheric Turbulence with PML at the Sutherland Observatory" }
null
null
null
null
true
null
9422
null
Default
null
null
null
{ "abstract": " Emojis, as a new way of conveying nonverbal cues, are widely adopted in\ncomputer-mediated communications. In this paper, first from a message sender\nperspective, we focus on people's motives in using four types of emojis --\npositive, neutral, negative, and non-facial. We compare the willingness levels\nof using these emoji types for seven typical intentions that people usually\napply nonverbal cues for in communication. The results of extensive statistical\nhypothesis tests not only report the popularities of the intentions, but also\nuncover the subtle differences between emoji types in terms of intended uses.\nSecond, from a perspective of message recipients, we further study the\nsentiment effects of emojis, as well as their duplications, on verbal messages.\nDifferent from previous studies in emoji sentiment, we study the sentiments of\nemojis and their contexts as a whole. The experiment results indicate that the\npowers of conveying sentiment are different between four emoji types, and the\nsentiment effects of emojis vary in the contexts of different valences.\n", "title": "Spice up Your Chat: The Intentions and Sentiment Effects of Using Emoji" }
null
null
[ "Computer Science" ]
null
true
null
9423
null
Validated
null
null
null
{ "abstract": " We investigate self-shielding of intergalactic hydrogen against ionizing\nradiation in radiative transfer simulations of cosmic reionization carefully\ncalibrated with Lyman alpha forest data. While self-shielded regions manifest\nas Lyman-limit systems in the post-reionization Universe, here we focus on\ntheir evolution during reionization (redshifts z=6-10). At these redshifts, the\nspatial distribution of hydrogen-ionizing radiation is highly inhomogeneous,\nand some regions of the Universe are still neutral. After masking the neutral\nregions and ionizing sources in the simulation, we find that the hydrogen\nphotoionization rate depends on the local hydrogen density in a manner very\nsimilar to that in the post-reionization Universe. The characteristic physical\nhydrogen density above which self-shielding becomes important at these\nredshifts is about $\\mathrm{n_H \\sim 3 \\times 10^{-3} cm^{-3}}$, or $\\sim$ 20\ntimes the mean hydrogen density, reflecting the fact that during reionization\nphotoionization rates are typically low enough that the filaments in the cosmic\nweb are often self-shielded. The value of the typical self-shielding density\ndecreases by a factor of 3 between redshifts z=3 and 10, and follows the\nevolution of the average photoionization rate in ionized regions in a simple\nfashion. We provide a simple parameterization of the photoionization rate as a\nfunction of density in self-shielded regions during the epoch of reionization.\n", "title": "Self-shielding of hydrogen in the IGM during the epoch of reionization" }
null
null
null
null
true
null
9424
null
Default
null
null
null
{ "abstract": " We propose a single neural probabilistic model based on variational\nautoencoder that can be conditioned on an arbitrary subset of observed features\nand then sample the remaining features in \"one shot\". The features may be both\nreal-valued and categorical. Training of the model is performed by stochastic\nvariational Bayes. The experimental evaluation on synthetic data, as well as\nfeature imputation and image inpainting problems, shows the effectiveness of\nthe proposed approach and diversity of the generated samples.\n", "title": "Universal Conditional Machine" }
null
null
null
null
true
null
9425
null
Default
null
null
null
{ "abstract": " We present a systematic study on higher-order penalty techniques for\nisogeometric mortar methods. In addition to the weak-continuity enforced by a\nmortar method, normal derivatives across the interface are penalized. The\nconsidered applications are fourth order problems as well as eigenvalue\nproblems for second and fourth order equations. The hybrid coupling enables the\ndiscretization of fourth order problems in a multi-patch setting as well as a\nconvenient implementation of natural boundary conditions. For second order\neigenvalue problems, the pollution of the discrete spectrum - typically\nreferred to as 'outliers' - can be avoided.\nNumerical results illustrate the good behaviour of the proposed method in\nsimple systematic studies as well as more complex multi-patch mapped geometries\nfor linear elasticity and Kirchhoff plates.\n", "title": "A hybrid isogeometric approach on multi-patches with applications to Kirchhoff plates and eigenvalue problems" }
null
null
null
null
true
null
9426
null
Default
null
null
null
{ "abstract": " This prospective chapter gives our view on the evolution of the study of\ncircumstellar discs within the next 20 years from both observational and\ntheoretical sides. We first present the expected improvements in our knowledge\nof protoplanetary discs as for their masses, sizes, chemistry, the presence of\nplanets as well as the evolutionary processes shaping these discs. We then\nexplore the older debris disc stage and explain what will be learnt concerning\ntheir birth, the intrinsic links between these discs and planets, the hot dust\nand the gas detected around main sequence stars as well as discs around white\ndwarfs.\n", "title": "Circumstellar discs: What will be next?" }
null
null
[ "Physics" ]
null
true
null
9427
null
Validated
null
null
null
{ "abstract": " For a nonlinear ordinary differential equation solved with respect to the\nhighest order derivative and rational in the other derivatives and in the\nindependent variable, we devise two algorithms to check if the equation can be\nreduced to a linear one by a point transformation of the dependent and\nindependent variables. The first algorithm is based on a construction of the\nLie point symmetry algebra and on the computation of its derived algebra. The\nsecond algorithm exploits the differential Thomas decomposition and allows not\nonly to test the linearizability, but also to generate a system of nonlinear\npartial differential equations that determines the point transformation and the\ncoefficients of the linearized equation. Both algorithms have been implemented\nin Maple and their application is illustrated using several examples.\n", "title": "Algorithmic Verification of Linearizability for Ordinary Differential Equations" }
null
null
null
null
true
null
9428
null
Default
null
null
null
{ "abstract": " This paper presents research on polar cap ionosphere space weather phenomena\nconducted during the European Cooperation in Science and Technology (COST)\naction ES0803 from 2008 to 2012. The main part of the work has been directed\ntoward the study of plasma instabilities and scintillations in association with\ncusp flow channels and polar cap electron density structures/patches,which is\nconsidered as critical knowledge in order to develop forecast models for\nscintillations in the polar cap. We have approached this problem by\nmulti-instrument techniques that comprise the EISCAT Svalbard Radar, SuperDARN\nradars, in-situ rocket, and GPS scintillation measurements. The Discussion\nsection aims to unify the bits and pieces of highly specialized information\nfrom several papers into a generalized picture. The cusp ionosphere appears as\na hot region in GPS scintillation climatology maps. Our results are consistent\nwith the existing view that scintillations in the cusp and the polar cap\nionosphere are mainly due to multi-scale structures generated by instability\nprocesses associated with the cross-polar transport of polar cap patches. We\nhave demonstrated that the SuperDARN convection model can be used to track\nthese patches backward and forward in time. Hence, once a patch has been\ndetected in the cusp inflow region, SuperDARN can be used to forecast its\ndestination in the future. However, the high-density gradient of polar cap\npatches is not the only prerequisite for high-latitude scintillations.\nUnprecedented high resolution rocket measurements reveal that the cusp\nionosphere is associated with filamentary precipitation giving rise to\nkilometer scale gradients onto which the gradient drift instability can operate\nvery efficiently... (continued)\n", "title": "Space weather challenges of the polar cap ionosphere" }
null
null
null
null
true
null
9429
null
Default
null
null
null
{ "abstract": " We report a large linear magnetoresistance in Cu$_{2-x}$Te, reaching\n$\\Delta\\rho/\\rho(0)$ = 250\\% at 2 K in a 9 T field. This is observed for\nsamples with $x$ in the range 0.13 to 0.22, and the results are comparable to\nthe effects observed in Ag$_2 X$ materials, although in this case the results\nappear for a much wider range of bulk carrier density. Examining the magnitude\nvs. crossover field from low-field quadratic to high-field linear behavior, we\nshow that models based on classical transport behavior best explain the\nobserved results. The effects are traced to misdirected currents due to\ntopologically inverted behavior in this system, such that stable surface states\nprovide the high mobility transport channels. The resistivity also crosses over\nto a $T^2$ dependence in the temperature range where the large linear MR\nappears, an indicator of electron-electron interaction effects within the\nsurface states. Thus this is an example of a system in which these interactions\ndominate the low-temperature behavior of the surface states.\n", "title": "Unconventional Large Linear Magnetoresistance in Cu$_{2-x}$Te" }
null
null
null
null
true
null
9430
null
Default
null
null
null
{ "abstract": " Given samples from an unknown distribution $p$ and a description of a\ndistribution $q$, are $p$ and $q$ close or far? This question of \"identity\ntesting\" has received significant attention in the case of testing whether $p$\nand $q$ are equal or far in total variation distance. However, in recent work,\nthe following questions have been been critical to solving problems at the\nfrontiers of distribution testing:\n-Alternative Distances: Can we test whether $p$ and $q$ are far in other\ndistances, say Hellinger?\n-Tolerance: Can we test when $p$ and $q$ are close, rather than equal? And if\nso, close in which distances?\nMotivated by these questions, we characterize the complexity of distribution\ntesting under a variety of distances, including total variation, $\\ell_2$,\nHellinger, Kullback-Leibler, and $\\chi^2$. For each pair of distances $d_1$ and\n$d_2$, we study the complexity of testing if $p$ and $q$ are close in $d_1$\nversus far in $d_2$, with a focus on identifying which problems allow strongly\nsublinear testers (i.e., those with complexity $O(n^{1 - \\gamma})$ for some\n$\\gamma > 0$ where $n$ is the size of the support of the distributions $p$ and\n$q$). We provide matching upper and lower bounds for each case. We also study\nthese questions in the case where we only have samples from $q$ (equivalence\ntesting), showing qualitative differences from identity testing in terms of\nwhen tolerance can be achieved. Our algorithms fall into the classical paradigm\nof $\\chi^2$-statistics, but require crucial changes to handle the challenges\nintroduced by each distance we consider. Finally, we survey other recent\nresults in an attempt to serve as a reference for the complexity of various\ndistribution testing problems.\n", "title": "Which Distribution Distances are Sublinearly Testable?" }
null
null
null
null
true
null
9431
null
Default
null
null
null
{ "abstract": " We study the relaxation dynamics of photo-carriers in the paramagnetic Mott\ninsulating phase of the half-filled two-band Hubbard model. Using\nnonequilibrium dynamical mean field theory, we excite charge carriers across\nthe Mott gap by a short hopping modulation, and simulate the evolution of the\nphoto-doped population within the Hubbard bands. We observe an ultrafast\ncharge-carrier relaxation driven by emission of local spin excitations with an\ninverse relaxation time proportional to the Hund's coupling. The photo-doping\ngenerates additional side-bands in the spectral function, and for strong Hund's\ncoupling, the photo-doped population also splits into several resonances. The\ndynamics of the local many-body states reveals two effects, thermal blocking\nand kinetic freezing, which manifest themselves when the Hund's coupling\nbecomes of the order of the temperature or the bandwidth, respectively. These\neffects, which are absent in the single-band Hubbard model, should be relevant\nfor the interpretation of experiments on correlated materials with multiple\nactive orbitals. In particular, the features revealed in the non-equilibrium\nenergy distribution of the photo-carriers are experimentally accessible, and\nprovide information on the role of the Hund's coupling in these materials.\n", "title": "Hund's coupling driven photo-carrier relaxation in the two-band Mott insulator" }
null
null
[ "Physics" ]
null
true
null
9432
null
Validated
null
null
null
{ "abstract": " Two of the most popular modelling paradigms in computer vision are\nfeed-forward neural networks (FFNs) and probabilistic graphical models (GMs).\nVarious connections between the two have been studied in recent works, such as\ne.g. expressing mean-field based inference in a GM as an FFN. This paper\nestablishes a new connection between FFNs and GMs. Our key observation is that\nany FFN implements a certain approximation of a corresponding Bayesian network\n(BN). We characterize various benefits of having this connection. In\nparticular, it results in a new learning algorithm for BNs. We validate the\nproposed methods for a classification problem on CIFAR-10 dataset and for\nbinary image segmentation on Weizmann Horse dataset. We show that statistically\nlearned BNs improve performance, having at the same time essentially better\ngeneralization capability, than their FFN counterparts.\n", "title": "A Connection between Feed-Forward Neural Networks and Probabilistic Graphical Models" }
null
null
null
null
true
null
9433
null
Default
null
null
null
{ "abstract": " Let $b \\ge 2$ be an integer. Among other results, we establish, in a\nquantitative form, that any sufficiently large integer which is not a multiple\nof $b$ cannot have simultaneously only few distinct prime factors and only few\nnonzero digits in its representation in base $b$.\n", "title": "On the digital representation of smooth numbers" }
null
null
null
null
true
null
9434
null
Default
null
null
null
{ "abstract": " In this paper, we study constraint qualifications for the nonconvex\ninequality defined by a proper lower semicontinuous function. These constraint\nqualifications involve the generalized construction of normal cones and\nsubdifferentials. Several conditions for these constraint qualifications are\nalso provided therein. When restricted to the convex inequality, these\nconstraint qualifications reduce to basic constraint qualification (BCQ) and\nstrong BCQ studied in [SIAM J. Optim., 14(2004), 757-772] and [Math. Oper.\nRes., 30 (2005), 956-965].\n", "title": "On Constraint Qualifications of a Nonconvex Inequality" }
null
null
null
null
true
null
9435
null
Default
null
null
null
{ "abstract": " We propose a framework employing stochastic differential equations to\nfacilitate the long-term stability analysis of power grids with intermittent\nwind power generations. This framework takes into account the discrete dynamics\nwhich play a critical role in the long-term stability analysis, incorporates\nthe model of wind speed with different probability distributions, and also\ndevelops an approximation methodology (by a deterministic hybrid model) for the\nstochastic hybrid model to reduce the computational burden brought about by the\nuncertainty of wind power. The theoretical and numerical studies show that a\ndeterministic hybrid model can provide an accurate trajectory approximation and\nstability assessments for the stochastic hybrid model under mild conditions. In\naddition, we discuss the critical cases that the deterministic hybrid model\nfails and discover that these cases are caused by a violation of the proposed\nsufficient conditions. Such discussion complements the proposed framework and\nmethodology and also reaffirms the importance of the stochastic hybrid model\nwhen the system operates close to its stability limit.\n", "title": "A Framework for Dynamic Stability Analysis of Power Systems with Volatile Wind Power" }
null
null
null
null
true
null
9436
null
Default
null
null
null
{ "abstract": " Kimura and Yoshida treated a model in which the finite variation part of a\ntwo-dimensional semimartingale is expressed by time-integration of latent\nprocesses. They proposed a correlation estimator between the latent processes\nand proved its consistency and asymptotic mixed normality. In this paper, we\ndiscuss the confidence interval of the correlation estimator to detect the\ncorrelation. %between latent processes. We propose two types of estimators for\nasymptotic variance of the correlation estimator and prove their consistency in\na high frequency setting. Our model includes doubly stochastic Poisson\nprocesses whose intensity processes are correlated Itô processes. We compare\nour estimators based on the simulation of the doubly stochastic Poisson\nprocesses.\n", "title": "Confidence interval for correlation estimator between latent processes" }
null
null
[ "Mathematics", "Statistics" ]
null
true
null
9437
null
Validated
null
null
null
{ "abstract": " Liquid scintillators are a common choice for neutrino physics experiments,\nbut their capabilities to perform background rejection by scintillation pulse\nshape discrimination is generally limited in large detectors. This paper\ndescribes a novel approach for a pulse shape based event classification\ndeveloped in the context of the Double Chooz reactor antineutrino experiment.\nUnlike previous implementations, this method uses the Fourier power spectra of\nthe scintillation pulse shapes to obtain event-wise information. A\nclassification variable built from spectral information was able to achieve an\nunprecedented performance, despite the lack of optimization at the detector\ndesign level. Several examples of event classification are provided, ranging\nfrom differentiation between the detector volumes and an efficient rejection of\ninstrumental light noise, to some sensitivity to the particle type, such as\nstopping muons, ortho-positronium formation, alpha particles as well as\nelectrons and positrons. In combination with other techniques the method is\nexpected to allow for a versatile and more efficient background rejection in\nthe future, especially if detector optimization is taken into account at the\ndesign level.\n", "title": "Novel event classification based on spectral analysis of scintillation waveforms in Double Chooz" }
null
null
[ "Physics" ]
null
true
null
9438
null
Validated
null
null
null
{ "abstract": " We consider the Lie group PSL(2) (the group of orientation preserving\nisometries of the hyperbolic plane) and a left-invariant Riemannian metric on\nthis group with two equal eigenvalues that correspond to space-like\neigenvectors (with respect to the Killing form). For such metrics we find a\nparametrization of geodesics, the conjugate time, the cut time and the cut\nlocus. The injectivity radius is computed. We show that the cut time and the\ncut locus in such Riemannian problem converge to the cut time and the cut locus\nin the corresponding sub-Riemannian problem as the third eigenvalue of the\nmetric tends to infinity. Similar results are also obtained for SL(2).\n", "title": "Symmetric Riemannian problem on the group of proper isometries of hyperbolic plane" }
null
null
null
null
true
null
9439
null
Default
null
null
null
{ "abstract": " Relation extraction is a fundamental task in information extraction. Most\nexisting methods have heavy reliance on annotations labeled by human experts,\nwhich are costly and time-consuming. To overcome this drawback, we propose a\nnovel framework, REHession, to conduct relation extractor learning using\nannotations from heterogeneous information source, e.g., knowledge base and\ndomain heuristics. These annotations, referred as heterogeneous supervision,\noften conflict with each other, which brings a new challenge to the original\nrelation extraction task: how to infer the true label from noisy labels for a\ngiven instance. Identifying context information as the backbone of both\nrelation extraction and true label discovery, we adopt embedding techniques to\nlearn the distributed representations of context, which bridges all components\nwith mutual enhancement in an iterative fashion. Extensive experimental results\ndemonstrate the superiority of REHession over the state-of-the-art.\n", "title": "Heterogeneous Supervision for Relation Extraction: A Representation Learning Approach" }
null
null
[ "Computer Science" ]
null
true
null
9440
null
Validated
null
null
null
{ "abstract": " The anelastic and pseudo-incompressible equations are two well-known\nsoundproof approximations of compressible flows useful for both theoretical and\nnumerical analysis in meteorology, atmospheric science, and ocean studies. In\nthis paper, we derive and test structure-preserving numerical schemes for these\ntwo systems. The derivations are based on a discrete version of the\nEuler-Poincaré variational method. This approach relies on a finite\ndimensional approximation of the (Lie) group of diffeomorphisms that preserve\nweighted-volume forms. These weights describe the background stratification of\nthe fluid and correspond to the weighed velocity fields for anelastic and\npseudo-incompressible approximations. In particular, we identify to these\ndiscrete Lie group configurations the associated Lie algebras such that\nelements of the latter correspond to weighted velocity fields that satisfy the\ndivergence-free conditions for both systems. Defining discrete Lagrangians in\nterms of these Lie algebras, the discrete equations follow by means of\nvariational principles. Descending from variational principles, the schemes\nexhibit further a discrete version of Kelvin circulation theorem, are\napplicable to irregular meshes, and show excellent long term energy behavior.\nWe illustrate the properties of the schemes by performing preliminary test\ncases.\n", "title": "Variational integrators for anelastic and pseudo-incompressible flows" }
null
null
null
null
true
null
9441
null
Default
null
null
null
{ "abstract": " Bayesian optimization is a sample-efficient approach to solving global\noptimization problems. Along with a surrogate model, this approach relies on\ntheoretically motivated value heuristics (acquisition functions) to guide the\nsearch process. Maximizing acquisition functions yields the best performance;\nunfortunately, this ideal is difficult to achieve since optimizing acquisition\nfunctions per se is frequently non-trivial. This statement is especially true\nin the parallel setting, where acquisition functions are routinely non-convex,\nhigh-dimensional, and intractable. Here, we demonstrate how many popular\nacquisition functions can be formulated as Gaussian integrals amenable to the\nreparameterization trick and, ensuingly, gradient-based optimization. Further,\nwe use this reparameterized representation to derive an efficient Monte Carlo\nestimator for the upper confidence bound acquisition function in the context of\nparallel selection.\n", "title": "The reparameterization trick for acquisition functions" }
null
null
[ "Computer Science", "Statistics" ]
null
true
null
9442
null
Validated
null
null
null
{ "abstract": " When making predictions about ecosystems, we often have available a number of\ndifferent ecosystem models that attempt to represent their dynamics in a\ndetailed mechanistic way. Each of these can be used as simulators of\nlarge-scale experiments and make forecasts about the fate of ecosystems under\ndifferent scenarios in order to support the development of appropriate\nmanagement strategies. However, structural differences, systematic\ndiscrepancies and uncertainties lead to different models giving different\npredictions under these scenarios. This is further complicated by the fact that\nthe models may not be run with the same species or functional groups, spatial\nstructure or time scale. Rather than simply trying to select a 'best' model, or\ntaking some weighted average, it is important to exploit the strengths of each\nof the available models, while learning from the differences between them. To\nachieve this, we construct a flexible statistical model of the relationships\nbetween a collection or 'ensemble' of mechanistic models and their biases,\nallowing for structural and parameter uncertainty and for different ways of\nrepresenting reality. Using this statistical meta-model, we can combine prior\nbeliefs, model estimates and direct observations using Bayesian methods, and\nmake coherent predictions of future outcomes under different scenarios with\nrobust measures of uncertainty. In this paper we present the modelling\nframework and discuss results obtained using a diverse ensemble of models in\nscenarios involving future changes in fishing levels. These examples illustrate\nthe value of our approach in predicting outcomes for possible strategies\npertaining to climate and fisheries policy aimed at improving food security and\nmaintaining ecosystem integrity.\n", "title": "Multi-model ensembles for ecosystem prediction" }
null
null
null
null
true
null
9443
null
Default
null
null
null
{ "abstract": " We consider longitudinal nonlinear atomic vibrations in uniformly strained\ncarbon chains with the cumulene structure ($=C=C=)_{n}$. With the aid of ab\ninitio simulations, based on the density functional theory, we have revealed\nthe phenomenon of the $\\pi$-mode softening in a certain range of its amplitude\nfor the strain above the critical value $\\eta_{c}\\approx 11\\,{\\%}$.\nCondensation of this soft mode induces the structural transformation of the\ncarbon chain with doubling of its unit cell. This is the Peierls phase\ntransition in the strained cumulene, which was previously revealed in [Nano\nLett. 14, 4224 (2014)]. The Peierls transition leads to appearance of the\nenergy gap in the electron spectrum of the strained carbyne, and this material\ntransforms from the conducting state to semiconducting or insulating states.\nThe authors of the above paper emphasize that such phenomenon can be used for\nconstruction of various nanodevices. The $\\pi$-mode softening occurs because\nthe old equilibrium positions (EQPs), around which carbon atoms vibrate at\nsmall strains, lose their stability and these atoms begin to vibrate in the new\npotential wells located near old EQPs. We study the stability of the new EQPs,\nas well as stability of vibrations in their vicinity. In previous paper\n[Physica D 203, 121(2005)], we proved that only three symmetry-determined\nRosenberg nonlinear normal modes can exist in monoatomic chains with arbitrary\ninterparticle interactions. They are the above-discussed $\\pi$-mode and two\nother modes, which we call $\\sigma$-mode and $\\tau$-mode. These modes\ncorrespond to the multiplication of the unit cell of the vibrational state by\ntwo, three or four times compared to that of the equilibrium state. We study\nproperties of these modes in the chain model with arbitrary pair potential of\ninterparticle interactions.\n", "title": "Nonlinear atomic vibrations and structural phase transitions in strained carbon chains" }
null
null
[ "Physics" ]
null
true
null
9444
null
Validated
null
null
null
{ "abstract": " For a smooth manifold $M$, possibly with boundary and corners, and a Lie\ngroup $G$, we consider a suitable description of gauge fields in terms of\nparallel transport, as groupoid homomorphisms from a certain path groupoid in\n$M$ to $G$. Using a cotriangulation $\\mathscr{C}$ of $M$, and collections of\nfinite-dimensional families of paths relative to $\\mathscr{C}$, we define a\nhomotopical equivalence relation of parallel transport maps, leading to the\nconcept of an extended lattice gauge (ELG) field. A lattice gauge field, as\nused in Lattice Gauge Theory, is part of the data contained in an ELG field,\nbut the latter contains further local topological information sufficient to\nreconstruct a principal $G$-bundle on $M$ up to equivalence. The space of ELG\nfields of a given pair $(M,\\mathscr{C})$ is a covering for the space of fields\nin Lattice Gauge Theory, whose connected components parametrize equivalence\nclasses of principal $G$-bundles on $M$. We give a criterion to determine when\nELG fields over different cotriangulations define equivalent bundles.\n", "title": "Homotopy classes of gauge fields and the lattice" }
null
null
null
null
true
null
9445
null
Default
null
null
null
{ "abstract": " A cyclic proof system, called CLKID-omega, gives us another way of\nrepresenting inductive definitions and effcient proof search. The 2011 paper by\nBrotherston and Simpson showed that the provability of CLKID-omega includes the\nprovability of Martin-Lof's system of inductive definitions, called LKID, and\nconjectured the equivalence. Since then, the equivalence has been left an open\nquestion. This paper shows that CLKID-omega and LKID are indeed not equivalent.\nThis paper considers a statement called 2-Hydra in these two systems with the\nfirst-order language formed by 0, the successor, the natural number predicate,\nand a binary predicate symbol used to express 2-Hydra. This paper shows that\nthe 2-Hydra statement is provable in CLKID-omega, but the statement is not\nprovable in LKID, by constructing some Henkin model where the statement is\nfalse.\n", "title": "Classical System of Martin-Lof's Inductive Definitions is not Equivalent to Cyclic Proofs" }
null
null
null
null
true
null
9446
null
Default
null
null
null
{ "abstract": " Let $\\theta$ be an inner function on the unit disk, and let\n$K^p_\\theta:=H^p\\cap\\theta\\overline{H^p_0}$ be the associated star-invariant\nsubspace of the Hardy space $H^p$, with $p\\ge1$. While a nontrivial function\n$f\\in K^p_\\theta$ is never divisible by $\\theta$, it may have a factor $h$\nwhich is \"not too different\" from $\\theta$ in the sense that the ratio\n$h/\\theta$ (or just the anti-analytic part thereof) is smooth on the circle. In\nthis case, $f$ is shown to have additional integrability and/or smoothness\nproperties, much in the spirit of the Hardy--Littlewood--Sobolev embedding\ntheorem. The appropriate norm estimates are established, and their sharpness is\ndiscussed.\n", "title": "Factorization and non-factorization theorems for pseudocontinuable functions" }
null
null
null
null
true
null
9447
null
Default
null
null
null
{ "abstract": " Web request query strings (queries), which pass parameters to the referenced\nresource, are always manipulated by attackers to retrieve sensitive data and\neven take full control of victim web servers and web applications. However,\nexisting malicious query detection approaches in the current literature cannot\ncope with changing web attacks with constant detection models. In this paper,\nwe propose AMODS, an adaptive system that periodically updates the detection\nmodel to detect the latest unknown attacks. We also propose an adaptive\nlearning strategy, called SVM HYBRID, leveraged by our system to minimize\nmanual work. In the evaluation, an up-to-date detection model is trained on a\nten-day query dataset collected from an academic institute's web server logs.\nOur system outperforms existing web attack detection methods, with an F-value\nof 94.79% and FP rate of 0.09%. The total number of malicious queries obtained\nby SVM HYBRID is 2.78 times that by the popular Support Vector Machine Adaptive\nLearning (SVM AL) method. The malicious queries obtained can be used to update\nthe Web Application Firewall (WAF) signature library.\n", "title": "Adaptively Detecting Malicious Queries in Web Attacks" }
null
null
null
null
true
null
9448
null
Default
null
null
null
{ "abstract": " We introduce signature payoffs, a family of path-dependent derivatives that\nare given in terms of the signature of the price path of the underlying asset.\nWe show that these derivatives are dense in the space of continuous payoffs, a\nresult that is exploited to quickly price arbitrary continuous payoffs. This\napproach to pricing derivatives is then tested with European options, American\noptions, Asian options, lookback options and variance swaps. As we show,\nsignature payoffs can be used to price these derivatives with very high\naccuracy.\n", "title": "Derivatives pricing using signature payoffs" }
null
null
null
null
true
null
9449
null
Default
null
null
null
{ "abstract": " We present deep ALMA CO(5-4) observations of a main sequence, clumpy galaxy\nat z=1.5 in the HUDF. Thanks to the ~0.5\" resolution of the ALMA data, we can\nlink stellar population properties to the CO(5-4) emission on scales of a few\nkpc. We detect strong CO(5-4) emission from the nuclear region of the galaxy,\nconsistent with the observed $L_{\\rm IR}$-$L^{\\prime}_{\\rm CO(5-4)}$\ncorrelation and indicating on-going nuclear star formation. The CO(5-4) gas\ncomponent appears more concentrated than other star formation tracers or the\ndust distribution in this galaxy. We discuss possible implications of this\ndifference in terms of star formation efficiency and mass build-up at the\ngalaxy centre. Conversely, we do not detect any CO(5-4) emission from the\nUV-bright clumps. This might imply that clumps have a high star formation\nefficiency (although they do not display unusually high specific star formation\nrates) and are not entirely gas dominated, with gas fractions no larger than\nthat of their host galaxy (~50%). Stellar feedback and disk instability torques\nfunnelling gas towards the galaxy centre could contribute to the relatively low\ngas content. Alternatively, clumps could fall in a more standard star formation\nefficiency regime if their actual star-formation rates are lower than generally\nassumed. We find that clump star-formation rates derived with several\ndifferent, plausible methods can vary by up to an order of magnitude. The\nlowest estimates would be compatible with a CO(5-4) non-detection even for\nmain-sequence like values of star formation efficiency and gas content.\n", "title": "ALMA constraints on star-forming gas in a prototypical z=1.5 clumpy galaxy: the dearth of CO(5-4) emission from UV-bright clumps" }
null
null
null
null
true
null
9450
null
Default
null
null
null
{ "abstract": " In this paper, we demonstrate a new data-driven framework for real-time\nneutral density estimation via model-data fusion in quasi-physical\nionosphere-thermosphere models. The framework has two main components: (i) the\ndevelopment of a quasi-physical dynamic reduced order model (ROM) that uses a\nlinear approximation of the underlying dynamics and effect of the drivers, and\n(ii) dynamic calibration of the ROM through estimation of the ROM coefficients\nthat represent the model parameters. We have previously demonstrated the\ndevelopment of a quasi-physical ROM using simulation output from a physical\nmodel and assimilation of non-operational density estimates derived from\naccelerometer measurements along a single orbit. In this paper, we demonstrate\nthe potential of the framework for use with operational measurements. We use\nsimulated GPS-derived orbit ephemerides with 5 minute resolution as\nmeasurements. The framework is a first of its kind, simple yet robust and\naccurate method with high potential for providing real-time operational updates\nto the state of the upper atmosphere using quasi-physical models with inherent\nforecasting/predictive capabilities.\n", "title": "Data-driven framework for real-time thermospheric density estimation" }
null
null
null
null
true
null
9451
null
Default
null
null
null
{ "abstract": " Turbulent mixing of chemical elements by convection has fundamental effects\non the evolution of stars. The standard algorithm at present, mixing-length\ntheory (MLT), is intrinsically local, and must be supplemented by extensions\nwith adjustable parameters. As a step toward reducing this arbitrariness, we\ncompare asteroseismically inferred internal structures of two Kepler slowly\npulsating B stars (SPB's; $M\\sim 3.25 M_\\odot$) to predictions of 321D\nturbulence theory, based upon well-resolved, truly turbulent three-dimensional\nsimulations (Arnett , et al. 2015, Christini, et al. 2016) which include\nboundary physics absent from MLT. We find promising agreement between the\nsteepness and shapes of the theoretically-predicted composition profile outside\nthe convective region in 3D simulations and in asteroseismically constrained\ncomposition profiles in the best 1D models of the two SPBs. The structure and\nmotion of the boundary layer, and the generation of waves, are discussed.\n", "title": "Synergies between Asteroseismology and Three-dimensional Simulations of Stellar Turbulence" }
null
null
null
null
true
null
9452
null
Default
null
null
null
{ "abstract": " Duke, Imamoglu, and Toth constructed a polyharmonic Maass form of level 4\nwhose Fourier coefficients encode real quadratic class numbers. A more general\nconstruction of such forms was subsequently given by Bruinier, Funke, and\nImamoglu. Here we give a direct construction of such a form for the full\nmodular group and study the properties of its coefficients. We give\ninterpretations of the coefficients of the holomorphic parts of each of these\npolyharmonic Maass forms as inner products of certain weakly holomorphic\nmodular forms and harmonic Maass forms. The coefficients of square index are\nparticularly intractable; in order to address these, we develop various\nextensions of the usual normalized Peterson inner product using a strategy of\nBringmann, Ehlen and Diamantis.\n", "title": "A polyharmonic Maass form of depth 3/2 for SL_2(Z)" }
null
null
null
null
true
null
9453
null
Default
null
null
null
{ "abstract": " We consider a system of $R$ cubic forms in $n$ variables, with integer\ncoefficients, which define a smooth complete intersection in projective space.\nProvided $n\\geq 25R$, we prove an asymptotic formula for the number of integer\npoints in an expanding box at which these forms simultaneously vanish. In\nparticular we can handle systems of forms in $O(R)$ variables, previous work\nhaving required that $n \\gg R^2$. One conjectures that $n \\geq 6R+1$ should be\nsufficient. We reduce the problem to an upper bound for the number of solutions\nto a certain auxiliary inequality. To prove this bound we adapt a method of\nDavenport.\n", "title": "Systems of cubic forms in many variables" }
null
null
null
null
true
null
9454
null
Default
null
null
null
{ "abstract": " Self-supported electrocatalysts being generated and employed directly as\nelectrode for energy conversion has been intensively pursued in the fields of\nmaterials chemistry and energy. Herein, we report a synthetic strategy to\nprepare freestanding hierarchically structured, nitrogen-doped nanoporous\ngraphitic carbon membranes functionalized with Janus-type Co/CoP nanocrystals\n(termed as HNDCM-Co/CoP), which were successfully applied as a\nhighly-efficient, binder-free electrode in hydrogen evolution reaction (HER).\nBenefited from multiple structural merits, such as high degree of\ngraphitization, three-dimensionally interconnected micro-/meso-/macropores,\nuniform nitrogen-doping, well-dispersed Co/CoP nanocrystals as well as the\nconfinement effect of the thin carbon layer on the nanocrystals, HNDCM-Co/CoP\nexhibited superior electrocatalytic activity and long-term operation stability\nfor HER under both acid and alkaline conditions. As a proof-of-concept of\npractical usage, a macroscopic piece of HNDCM-Co/CoP of 5.6 cm x 4 cm x 60 um\nin size was prepared in our laboratory. Driven by a solar cell,\nelectroreduction of water in alkaline condition (pH 14) was performed, and H2\nhas been produced at a rate of 16 ml/min, demonstrating its potential as\nreal-life energy conversion systems.\n", "title": "Nitrogen-doped Nanoporous Carbon Membranes Functionalized with Co/CoP Janus-type nanocrystals as Hydrogen Evolution Electrode in Both Acid and Alkaline Environment" }
null
null
null
null
true
null
9455
null
Default
null
null
null
{ "abstract": " With the exponential growth of cyber-physical systems (CPS), new security\nchallenges have emerged. Various vulnerabilities, threats, attacks, and\ncontrols have been introduced for the new generation of CPS. However, there\nlack a systematic study of CPS security issues. In particular, the\nheterogeneity of CPS components and the diversity of CPS systems have made it\nvery difficult to study the problem with one generalized model.\nIn this paper, we capture and systematize existing research on CPS security\nunder a unified framework. The framework consists of three orthogonal\ncoordinates: (1) from the \\emph{security} perspective, we follow the well-known\ntaxonomy of threats, vulnerabilities, attacks and controls; (2)from the\n\\emph{CPS components} perspective, we focus on cyber, physical, and\ncyber-physical components; and (3) from the \\emph{CPS systems} perspective, we\nexplore general CPS features as well as representative systems (e.g., smart\ngrids, medical CPS and smart cars). The model can be both abstract to show\ngeneral interactions of a CPS application and specific to capture any details\nwhen needed. By doing so, we aim to build a model that is abstract enough to be\napplicable to various heterogeneous CPS applications; and to gain a modular\nview of the tightly coupled CPS components. Such abstract decoupling makes it\npossible to gain a systematic understanding of CPS security, and to highlight\nthe potential sources of attacks and ways of protection.\n", "title": "Cyber-Physical Systems Security -- A Survey" }
null
null
null
null
true
null
9456
null
Default
null
null
null
{ "abstract": " In the standard web browser programming model, third-party scripts included\nin an application execute with the same privilege as the application's own\ncode. This leaves the application's confidential data vulnerable to theft and\nleakage by malicious code and inadvertent bugs in the third-party scripts.\nSecurity mechanisms in modern browsers (the same-origin policy, cross-origin\nresource sharing and content security policies) are too coarse to suit this\nprogramming model. All these mechanisms (and their extensions) describe whether\nor not a script can access certain data, whereas the meaningful requirement is\nto allow untrusted scripts access to confidential data that they need and to\nprevent the scripts from leaking data on the side. Motivated by this gap, we\npropose WebPol, a policy mechanism that allows a website developer to include\nfine-grained policies on confidential application data in the familiar syntax\nof the JavaScript programming language. The policies can be associated with any\nwebpage element, and specify what aspects of the element can be accessed by\nwhich third-party domains. A script can access data that the policy allows it\nto, but it cannot pass the data (or data derived from it) to other scripts or\nremote hosts in contravention of the policy. To specify the policies, we expose\na small set of new native APIs in JavaScript. Our policies can be enforced\nusing any of the numerous existing proposals for information flow tracking in\nweb browsers. We have integrated our policies into one such proposal that we\nuse to evaluate performance overheads and to test our examples.\n", "title": "WebPol: Fine-grained Information Flow Policies for Web Browsers" }
null
null
null
null
true
null
9457
null
Default
null
null
null
{ "abstract": " The pseudo-marginal algorithm is a variant of the Metropolis-Hastings\nalgorithm which samples asymptotically from a probability distribution when it\nis only possible to estimate unbiasedly an unnormalized version of its density.\nPractically, one has to trade-off the computational resources used to obtain\nthis estimator against the asymptotic variances of the ergodic averages\nobtained by the pseudo-marginal algorithm. Recent works optimizing this\ntrade-off rely on some strong assumptions which can cast doubts over their\npractical relevance. In particular, they all assume that the distribution of\nthe additive error in the log-likelihood estimator is independent of the\nparameter value at which it is evaluated. Under weak regularity conditions we\nshow here that, as the number of data points tends to infinity, a\nspace-rescaled version of the pseudo-marginal chain converges weakly towards\nanother pseudo-marginal chain for which this assumption indeed holds. A study\nof this limiting chain allows us to provide parameter dimension-dependent\nguidelines on how to optimally scale a normal random walk proposal and the\nnumber of Monte Carlo samples for the pseudo-marginal method in the large\nsample regime. This complements and validates currently available results.\n", "title": "Large Sample Asymptotics of the Pseudo-Marginal Method" }
null
null
null
null
true
null
9458
null
Default
null
null
null
{ "abstract": " The present contribution investigates the dynamics generated by the\ntwo-dimensional Vlasov-Poisson-Fokker-Planck equation for charged particles in\na steady inhomogeneous background of opposite charges. We provide global in\ntime estimates that are uniform with respect to initial data taken in a bounded\nset of a weighted $L^2$ space, and where dependencies on the mean-free path\n$\\tau$ and the Debye length $\\delta$ are made explicit. In our analysis the\nmean free path covers the full range of possible values: from the regime of\nevanescent collisions $\\tau\\to\\infty$ to the strongly collisional regime\n$\\tau\\to0$. As a counterpart, the largeness of the Debye length, that enforces\na weakly nonlinear regime, is used to close our nonlinear estimates.\nAccordingly we pay a special attention to relax as much as possible the\n$\\tau$-dependent constraint on $\\delta$ ensuring exponential decay with\nexplicit $\\tau$-dependent rates towards the stationary solution. In the\nstrongly collisional limit $\\tau\\to0$, we also examine all possible asymptotic\nregimes selected by a choice of observation time scale. Here also, our emphasis\nis on strong convergence, uniformity with respect to time and to initial data\nin bounded sets of a $L^2$ space. Our proofs rely on a detailed study of the\nnonlinear elliptic equation defining stationary solutions and a careful\ntracking and optimization of parameter dependencies of\nhypocoercive/hypoelliptic estimates.\n", "title": "Large-time behavior of solutions to Vlasov-Poisson-Fokker-Planck equations: from evanescent collisions to diffusive limit" }
null
null
null
null
true
null
9459
null
Default
null
null
null
{ "abstract": " Failure rates in high performance computers rapidly increase due to the\ngrowth in system size and complexity. Hence, failures became the norm rather\nthan the exception. Different approaches on high performance computing (HPC)\nsystems have been introduced, to prevent failures (e. g., redundancy) or at\nleast minimize their impacts (e. g., checkpoint and restart). In most cases,\nwhen these approaches are employed to increase the resilience of certain parts\nof a system, energy consumption rapidly increases, or performance significantly\ndegrades. To address this challenge, we propose on-demand resilience as an\napproach to achieve adaptive resilience in HPC systems. In this work, the HPC\nsystem is considered in its entirety and resilience mechanisms such as\ncheckpointing, isolation, and migration, are activated on-demand. Using the\nproposed approach, the unavoidable increase in total energy consumption and\nsystem performance degradation is decreased compared to the typical\ncheckpoint/restart and redundant resilience mechanisms. Our work aims to\nmitigate a large number of failures occurring at various layers in the system,\nto prevent their propagation, and to minimize their impact, all of this in an\nenergy-saving manner. In the case of failures that are estimated to occur but\ncannot be mitigated using the proposed on-demand resilience approach, the\nsystem administrators will be notified in view of performing further\ninvestigations into the causes of these failures and their impacts.\n", "title": "Towards Adaptive Resilience in High Performance Computing" }
null
null
null
null
true
null
9460
null
Default
null
null
null
{ "abstract": " It has long been assumed that high dimensional continuous control problems\ncannot be solved effectively by discretizing individual dimensions of the\naction space due to the exponentially large number of bins over which policies\nwould have to be learned. In this paper, we draw inspiration from the recent\nsuccess of sequence-to-sequence models for structured prediction problems to\ndevelop policies over discretized spaces. Central to this method is the\nrealization that complex functions over high dimensional spaces can be modeled\nby neural networks that predict one dimension at a time. Specifically, we show\nhow Q-values and policies over continuous spaces can be modeled using a next\nstep prediction model over discretized dimensions. With this parameterization,\nit is possible to both leverage the compositional structure of action spaces\nduring learning, as well as compute maxima over action spaces (approximately).\nOn a simple example task we demonstrate empirically that our method can perform\nglobal search, which effectively gets around the local optimization issues that\nplague DDPG. We apply the technique to off-policy (Q-learning) methods and show\nthat our method can achieve the state-of-the-art for off-policy methods on\nseveral continuous control tasks.\n", "title": "Discrete Sequential Prediction of Continuous Actions for Deep RL" }
null
null
null
null
true
null
9461
null
Default
null
null
null
{ "abstract": " Sampling errors in nested sampling parameter estimation differ from those in\nBayesian evidence calculation, but have been little studied in the literature.\nThis paper provides the first explanation of the two main sources of sampling\nerrors in nested sampling parameter estimation, and presents a new diagrammatic\nrepresentation for the process. We find no current method can accurately\nmeasure the parameter estimation errors of a single nested sampling run, and\npropose a method for doing so using a new algorithm for dividing nested\nsampling runs. We empirically verify our conclusions and the accuracy of our\nnew method.\n", "title": "Sampling Errors in Nested Sampling Parameter Estimation" }
null
null
null
null
true
null
9462
null
Default
null
null
null
{ "abstract": " This paper explores the design and development of a class of robust\ndiver-following algorithms for autonomous underwater robots. By considering the\noperational challenges for underwater visual tracking in diverse real-world\nsettings, we formulate a set of desired features of a generic diver following\nalgorithm. We attempt to accommodate these features and maximize general\ntracking performance by exploiting the state-of-the-art deep object detection\nmodels. We fine-tune the building blocks of these models with a goal of\nbalancing the trade-off between robustness and efficiency in an onboard setting\nunder real-time constraints. Subsequently, we design an architecturally simple\nConvolutional Neural Network (CNN)-based diver-detection model that is much\nfaster than the state-of-the-art deep models yet provides comparable detection\nperformances. In addition, we validate the performance and effectiveness of the\nproposed diver-following modules through a number of field experiments in\nclosed-water and open-water environments.\n", "title": "Towards a Generic Diver-Following Algorithm: Balancing Robustness and Efficiency in Deep Visual Detection" }
null
null
null
null
true
null
9463
null
Default
null
null
null
{ "abstract": " Bitcoin and its underlying technology Blockchain have become popular in\nrecent years. Designed to facilitate a secure distributed platform without\ncentral authorities, Blockchain is heralded as a paradigm that will be as\npowerful as Big Data, Cloud Computing and Machine learning. Blockchain\nincorporates novel ideas from various fields such as public key encryption and\ndistributed systems. As such, a reader often comes across resources that\nexplain the Blockchain technology from a certain perspective only, leaving the\nreader with more questions than before. We will offer a holistic view on\nBlockchain. Starting with a brief history, we will give the building blocks of\nBlockchain, and explain their interactions. As graph mining has become a major\npart its analysis, we will elaborate on graph theoretical aspects of the\nBlockchain technology. We also devote a section to the future of Blockchain and\nexplain how extensions like Smart Contracts and De-centralized Autonomous\nOrganizations will function. Without assuming any reader expertise, our aim is\nto provide a concise but complete description of the Blockchain technology.\n", "title": "Blockchain: A Graph Primer" }
null
null
null
null
true
null
9464
null
Default
null
null
null
{ "abstract": " Results are presented of direct numerical simulations of incompressible,\nhomogeneous magnetohydrodynamic turbulence without a mean magnetic field,\nsubject to different mechanical forcing functions commonly used in the\nliterature. Specifically, the forces are negative damping (which uses the\nlarge-scale velocity field as a forcing function), a nonhelical random force,\nand a nonhelical static sinusoidal force (analogous to helical ABC forcing).\nThe time evolution of the three ideal invariants (energy, magnetic helicity and\ncross helicity), the time-averaged energy spectra, the energy ratios and the\ndissipation ratios are examined. All three forcing functions produce\nqualitatively similar steady states with regards to the time evolution of the\nenergy and magnetic helicity. However, differences in the cross helicity\nevolution are observed, particularly in the case of the static sinusoidal\nmethod of energy injection. Indeed, an ensemble of sinusoidally-forced\nsimulations with identical parameters shows significant variations in the cross\nhelicity over long time periods, casting some doubt on the validity of the\nprinciple of ergodicity in systems in which the injection of helicity cannot be\ncontrolled. Cross helicity can unexpectedly enter the system through the\nforcing function and must be carefully monitored.\n", "title": "Comparison of forcing functions in magnetohydrodynamic turbulence" }
null
null
null
null
true
null
9465
null
Default
null
null
null
{ "abstract": " The integration of large-scale renewable generation has major implications on\nthe operation of power systems, two of which we address in this paper. First,\nsystem operators have to deal with higher degrees of uncertainty. Second, with\nabundant potential of renewable generation in remote locations, they need to\nincorporate the operation of High Voltage Direct Current lines (HVDC). This\npaper introduces an optimization tool that addresses both challenges by\nincorporating; the full AC power flow equations and chance constraints to\naddress the uncertainty of renewable infeed, HVDC modeling for point-to-point\nlines, and optimizing generator and HVDC corrective control policies in\nreaction to uncertainty. The main contributions are twofold. First, we\nintroduce a HVDC line model and the corresponding HVDC participation factors in\na chance-constrained AC-OPF framework. Second, we modify an existing algorithm\nfor solving the chance-constrained AC optimal power flow to allow for\noptimization of the generation and HVDC participation factors. Using realistic\nwind forecast data, and a 10 bus system with one HVDC line and two wind farms,\nwe demonstrate the performance of our algorithm and show the benefit of\ncontrollability.\n", "title": "Chance-Constrained AC Optimal Power Flow Integrating HVDC Lines and Controllability" }
null
null
null
null
true
null
9466
null
Default
null
null
null
{ "abstract": " Recent results on supercomputers show that beyond 65K cores, the efficiency\nof molecular dynamics simulations of interfacial systems decreases\nsignificantly. In this paper, we introduce a dynamic cutoff method (DCM) for\ninterfacial systems of arbitrarily large size. The idea consists in adopting a\ncutoff-based method in which the cutoff is cho- sen on a particle-by-particle\nbasis, according to the distance from the interface. Computationally, the\nchallenge is shifted from the long-range solvers to the detection of the\ninterfaces and to the computation of the particle-interface distances. For\nthese tasks, we present linear-time algorithms that do not rely on global\ncommunication patterns. As a result, the DCM algorithm is suited for large\nsystems of particles and mas- sively parallel computers. To demonstrate its\npotential, we integrated DCM into the LAMMPS open-source molecular dynamics\npackage, and simulated large liquid/vapor systems on two supercomputers:\nSuperMuc and JUQUEEN. In all cases, the accuracy of DCM is comparable to the\ntraditional particle-particle particle-mesh (PPPM) algorithm, while the\nperformance is considerably superior for large numbers of particles. For\nJUQUEEN, we provide timings for simulations running on the full system (458,\n752 cores), and show nearly perfect strong and weak scaling.\n", "title": "A Scalable, Linear-Time Dynamic Cutoff Algorithm for Molecular Dynamics" }
null
null
null
null
true
null
9467
null
Default
null
null
null
{ "abstract": " Aims: Density waves are often considered as the triggering mechanism of star\nformation in spiral galaxies. Our aim is to study relations between different\nstar formation tracers (stellar UV and near-IR radiation and emission from HI,\nCO and cold dust) in the spiral arms of M31, to calculate stability conditions\nin the galaxy disc and to draw conclusions about possible star formation\ntriggering mechanisms.\nMethods: We select fourteen spiral arm segments from the de-projected data\nmaps and compare emission distributions along the cross sections of the\nsegments in different datasets to each other, in order to detect spatial\noffsets between young stellar populations and the star forming medium. By using\nthe disc stability condition as a function of perturbation wavelength and\ndistance from the galaxy centre we calculate the effective disc stability\nparameters and the least stable wavelengths at different distances. For this we\nutilise a mass distribution model of M31 with four disc components (old and\nyoung stellar discs, cold and warm gaseous discs) embedded within the external\npotential of the bulge, the stellar halo and the dark matter halo. Each\ncomponent is considered to have a realistic finite thickness.\nResults: No systematic offsets between the observed UV and CO/far-IR emission\nacross the spiral segments are detected. The calculated effective stability\nparameter has a minimal value Q_{eff} ~ 1.8 at galactocentric distances 12 - 13\nkpc. The least stable wavelengths are rather long, with the minimal values\nstarting from ~ 3 kpc at distances R > 11 kpc.\nConclusions: The classical density wave theory is not a realistic explanation\nfor the spiral structure of M31. Instead, external causes should be considered,\ne.g. interactions with massive gas clouds or dwarf companions of M31.\n", "title": "Spiral arms and disc stability in the Andromeda galaxy" }
null
null
null
null
true
null
9468
null
Default
null
null
null
{ "abstract": " According to a traditional point of view Boltzmann entropy is intimately\nrelated to linear Fokker-Planck equations (Smoluchowski, Klein-Kramers, and\nRayleigh equations) that describe a well-known nonequilibrium phenomenon:\n(normal) Brownian motion of a particle immersed in a thermal bath.\nNevertheless, current researches have claimed that non-Boltzmann entropies\n(Tsallis and Renyi entropies, for instance) may give rise to anomalous Brownian\nmotion through nonlinear Fokker-Planck equations. The novelty of the present\narticle is to show that anomalous diffusion could be investigated within the\nframework of non-Markovian linear Fokker-Planck equations. So on the ground of\nthis non-Markovian approach to Brownian motion, we find out anomalous diffusion\ncharacterized by the mean square displacement of a free particle and a harmonic\noscillator in absence of inertial force as well as the mean square momentum of\na free particle in presence of inertial force.\n", "title": "Anomalous Brownian motion via linear Fokker-Planck equations" }
null
null
null
null
true
null
9469
null
Default
null
null
null
{ "abstract": " A mathematical model for variable selection in functional regression models\nwith scalar response is proposed. By \"variable selection\" we mean a procedure\nto replace the whole trajectories of the functional explanatory variables with\ntheir values at a finite number of carefully selected instants (or \"impact\npoints\"). The basic idea of our approach is to use the Reproducing Kernel\nHilbert Space (RKHS) associated with the underlying process, instead of the\nmore usual L2[0,1] space, in the definition of the linear model. This turns out\nto be especially suitable for variable selection purposes, since the\nfinite-dimensional linear model based on the selected \"impact points\" can be\nseen as a particular case of the RKHS-based linear functional model. In this\nframework, we address the consistent estimation of the optimal design of impact\npoints and we check, via simulations and real data examples, the performance of\nthe proposed method.\n", "title": "An RKHS model for variable selection in functional regression" }
null
null
null
null
true
null
9470
null
Default
null
null
null
{ "abstract": " We report the development of a multichannel microscopy for whole-slide\nmultiplane, multispectral, and phase imaging. We use trinocular heads to split\nthe beam path into 6 independent channels and employ a camera array for\nparallel data acquisition, achieving a maximum data throughput of ~1 gigapixel\nper second. To perform single-frame rapid autofocusing, we place two\nnear-infrared LEDs at the back focal plane of the condenser lens to illuminate\nthe sample from two different incident angles. A hot mirror is used to direct\nthe near-infrared light to an autofocusing camera. For multiplane whole slide\nimaging (WSI), we acquire 6 different focal planes of a thick specimen\nsimultaneously. For multispectral WSI, we relay the 6 independent image planes\nto the same focal position and simultaneously acquire information at 6 spectral\nbands. For whole-slide phase imaging, we acquire images at 3 focal positions\nsimultaneously and use the transport-of-intensity equation to recover the phase\ninformation. We also provide an open-source design to further increase the\nnumber of channels from 6 to 15. The reported platform provides a simple\nsolution for multiplexed fluorescence imaging and multimodal WSI. Acquiring an\ninstant focal stack without z-scanning may also enable fast 3D dynamic tracking\nof various biological samples.\n", "title": "Dual-LED-based multichannel microscopy for whole-slide multiplane, multispectral, and phase imaging" }
null
null
null
null
true
null
9471
null
Default
null
null
null
{ "abstract": " The trinity of so-called \"canonical\" wall-bounded turbulent flows, comprising\nthe zero pressure gradient turbulent boundary layer, abbreviated ZPG TBL,\nturbulent pipe flow and channel/duct flows has continued to receive intense\nattention as new and more reliable experimental data have become available.\nNevertheless, the debate on whether the logarithmic part of the mean velocity\nprofile, in particular the Kármán constant $\\kappa$, is identical for these\nthree canonical flows or flow-dependent is still ongoing. In this paper, which\nexpands upon Monkewitz and Nagib (24th ICTAM Conf., Montreal, 2016), the\nasymptotic matching requirement of equal $\\kappa$ in the log-law and in the\nexpression for the centerline/free-stream velocity is reiterated and shown to\npreclude a single universal log-law in the three canonical flows or at least\nmake it very unlikely. The current re-analysis of high quality mean velocity\nprofiles in ZPG TBL's, the Princeton \"Superpipe\" and in channels and ducts\nleads to a coherent description of (almost) all seemingly contradictory data\ninterpretations in terms of TWO logarithmic regions in pipes and channels: A\nuniversal interior, near-wall logarithmic region with the same parameters as in\nthe ZPG TBL, in particular $\\kappa_{\\mathrm{wall}} \\cong 0.384$, but only\nextending from around $150$ to around $10^3$ wall units, and shrinking with\nincreasing pressure gradient, followed by an exterior logarithmic region with a\nflow specific $\\kappa$ matching the logarithmic slope of the respective\nfree-stream or centerline velocity. The log-law parameters of the exterior\nlogarithmic region in channels and pipes are shown to depend monotonically on\nthe pressure gradient.\n", "title": "Revisiting the quest for a universal log-law and the role of pressure gradient in \"canonical\" wall-bounded turbulent flows" }
null
null
null
null
true
null
9472
null
Default
null
null
null
{ "abstract": " In this paper, we consider the problem of learning object manipulation tasks\nfrom human demonstration using RGB or RGB-D cameras. We highlight the key\nchallenges in capturing sufficiently good data with no tracking devices -\nstarting from sensor selection and accurate 6DoF pose estimation to natural\nlanguage processing. In particular, we focus on two showcases: gluing task with\na glue gun and simple block-stacking with variable blocks. Furthermore, we\ndiscuss how a linguistic description of the task could help to improve the\naccuracy of task description. We also present the whole architecture of our\ntransfer of the imitated task to the simulated and real robot environment.\n", "title": "Teaching robots to imitate a human with no on-teacher sensors. What are the key challenges?" }
null
null
null
null
true
null
9473
null
Default
null
null
null
{ "abstract": " This paper addresses important control and observability aspects of the phase\nsynchronization of two oscillators. To this aim a feedback control framework is\nproposed based on which issues related to master-slave synchronization are\nanalyzed. Comparing results using Cartesian and cylindrical coordinates in the\ncontext of the proposed framework it is argued that: i)~observability does not\nplay a significant role in phase synchronization, although it is granted that\nit might be relevant for complete synchronization; and ii)~a practical\ndifficulty is faced when phase synchronization is aimed at but the control\naction is not a direct function of the phase error. A procedure for overcoming\nsuch a problem is proposed. The only assumption made is that the phase can be\nestimated using the arctangent function. The main aspects of the paper are\nillustrated using the Poincaré equations, van der Pol and Rössler\noscillators in dynamical regimes for which the phase is well defined.\n", "title": "Control and Observability Aspects of Phase Synchronization" }
null
null
null
null
true
null
9474
null
Default
null
null
null
{ "abstract": " We present a theoretical investigation of the dynamic density structure\nfactor of a strongly interacting Fermi gas near a Feshbach resonance at finite\ntemperature. The study is based on a gauge invariant linear response theory.\nThe theory is consistent with a diagrammatic approach for the equilibrium state\ntaking into account the pair fluctuation effects and respects some important\nrestrictions like the $f$-sum rule. Our numerical results show that the dynamic\ndensity structure factor at large incoming momentum and at half recoil\nfrequency has a qualitatively similar behavior as the order parameter, which\ncan signify the appearance of the condensate. This qualitatively agrees with\nthe recent Bragg spectroscopy experiment results. We also present the results\nat small incoming momentum.\n", "title": "Dynamic density structure factor of a unitary Fermi gas at finite temperature" }
null
null
null
null
true
null
9475
null
Default
null
null
null
{ "abstract": " Let $X$ be a compact metrizable group and $\\Gamma$ a countable group acting\non $X$ by continuous group automorphisms. We give sufficient conditions under\nwhich the dynamical system $(X,\\Gamma)$ is surjunctive, i.e., every injective\ncontinuous map $\\tau \\colon X \\to X$ commuting with the action of $\\Gamma$ is\nsurjective.\n", "title": "Surjunctivity and topological rigidity of algebraic dynamical systems" }
null
null
null
null
true
null
9476
null
Default
null
null
null
{ "abstract": " A novel diverse domain (DCT-SVD & DWT-SVD) watermarking scheme is proposed in\nthis paper. Here, the watermark is embedded simultaneously onto the two\ndomains. It is shown that an audio signal watermarked using this scheme has\nbetter subjective and objective quality when compared with other watermarking\nschemes. Also proposed are two novel watermark detection algorithms viz., AOT\n(Adaptively Optimised Threshold) and AOTx (AOT eXtended). The fundamental idea\nbehind both is finding an optimum threshold for detecting a known character\nembedded along with the actual watermarks in a known location, with the\nconstraint that the Bit Error Rate (BER) is minimum. This optimum threshold is\nused for detecting the other characters in the watermarks. This approach is\nshown to make the watermarking scheme less susceptible to various signal\nprocessing attacks, thus making the watermarks more robust.\n", "title": "High Resilience Diverse Domain Multilevel Audio Watermarking with Adaptive Threshold" }
null
null
null
null
true
null
9477
null
Default
null
null
null
{ "abstract": " Solving a large-scale regularized linear inverse problem using multiple\nprocessors is important in various real-world applications due to the\nlimitations of individual processors and constraints on data sharing policies.\nThis paper focuses on the setting where the matrix is partitioned column-wise.\nWe extend the algorithmic framework and the theoretical analysis of approximate\nmessage passing (AMP), an iterative algorithm for solving linear inverse\nproblems, whose asymptotic dynamics are characterized by state evolution (SE).\nIn particular, we show that column-wise multiprocessor AMP (C-MP-AMP) obeys an\nSE under the same assumptions when the SE for AMP holds. The SE results imply\nthat (i) the SE of C-MP-AMP converges to a state that is no worse than that of\nAMP and (ii) the asymptotic dynamics of C-MP-AMP and AMP can be identical.\nMoreover, for a setting that is not covered by SE, numerical results show that\ndamping can improve the convergence performance of C-MP-AMP.\n", "title": "Multiprocessor Approximate Message Passing with Column-Wise Partitioning" }
null
null
null
null
true
null
9478
null
Default
null
null
null
{ "abstract": " An important property of statistical estimators is qualitative robustness,\nthat is small changes in the distribution of the data only result in small\nchances of the distribution of the estimator. Moreover, in practice, the\ndistribution of the data is commonly unknown, therefore bootstrap\napproximations can be used to approximate the distribution of the estimator.\nHence qualitative robustness of the statistical estimator under the bootstrap\napproximation is a desirable property. Currently most theoretical\ninvestigations on qualitative robustness assume independent and identically\ndistributed pairs of random variables. However, in practice this assumption is\nnot fulfilled. Therefore, we examine the qualitative robustness of bootstrap\napproximations for non-i.i.d. random variables, for example $\\alpha$-mixing and\nweakly dependent processes. In the i.i.d. case qualitative robustness is\nensured via the continuity of the statistical operator, representing the\nestimator, see Hampel (1971) and Cuevas and Romo (1993). We show, that\nqualitative robustness of the bootstrap approximation is still ensured under\nthe assumption that the statistical operator is continuous and under an\nadditional assumption on the stochastic process. In particular, we require a\nconvergence condition of the empirical measure of the underlying process, the\nso called Varadarajan property.\n", "title": "Qualitative robustness for bootstrap approximations" }
null
null
null
null
true
null
9479
null
Default
null
null
null
{ "abstract": " The kernel-based regularization method has two core issues: kernel design and\nhyperparameter estimation. In this paper, we focus on the second issue and\nstudy the properties of several hyperparameter estimators including the\nempirical Bayes (EB) estimator, two Stein's unbiased risk estimators (SURE) and\ntheir corresponding Oracle counterparts, with an emphasis on the asymptotic\nproperties of these hyperparameter estimators. To this goal, we first derive\nand then rewrite the first order optimality conditions of these hyperparameter\nestimators, leading to several insights on these hyperparameter estimators.\nThen we show that as the number of data goes to infinity, the two SUREs\nconverge to the best hyperparameter minimizing the corresponding mean square\nerror, respectively, while the more widely used EB estimator converges to\nanother best hyperparameter minimizing the expectation of the EB estimation\ncriterion. This indicates that the two SUREs are asymptotically optimal but the\nEB estimator is not. Surprisingly, the convergence rate of two SUREs is slower\nthan that of the EB estimator, and moreover, unlike the two SUREs, the EB\nestimator is independent of the convergence rate of $\\Phi^T\\Phi/N$ to its\nlimit, where $\\Phi$ is the regression matrix and $N$ is the number of data. A\nMonte Carlo simulation is provided to demonstrate the theoretical results.\n", "title": "On Asymptotic Properties of Hyperparameter Estimators for Kernel-based Regularization Methods" }
null
null
[ "Computer Science" ]
null
true
null
9480
null
Validated
null
null
null
{ "abstract": " Despite that accelerating convolutional neural network (CNN) receives an\nincreasing research focus, the save on resource consumption always comes with a\ndecrease in accuracy. To both increase accuracy and decrease resource\nconsumption, we explore an environment information, called class skew, which is\neasily available and exists widely in daily life. Since the class skew may\nswitch as time goes, we bring up probability layer to utilize class skew\nwithout any overhead during the runtime. Further, we observe class skew\ndichotomy that some class skew may appear frequently in the future, called hot\nclass skew, and others will never appear again or appear seldom, called cold\nclass skew. Inspired by techniques from source code optimization, two modes,\ni.e., interpretation and compilation, are proposed. The interpretation mode\npursues efficient adaption during runtime for cold class skew and the\ncompilation mode aggressively optimize on hot ones for more efficient\ndeployment in the future. Aggressive optimization is processed by\nclass-specific pruning and provides extra benefit. Finally, we design a\nsystematic framework, SECS, to dynamically detect class skew, processing\ninterpretation and compilation, as well as select the most accurate\narchitectures under the runtime resource budget. Extensive evaluations show\nthat SECS can realize end-to-end classification speedups by a factor of 3x to\n11x relative to state-of-the-art convolutional neural networks, at a higher\naccuracy.\n", "title": "SECS: Efficient Deep Stream Processing via Class Skew Dichotomy" }
null
null
null
null
true
null
9481
null
Default
null
null
null
{ "abstract": " We present a numerical implementation of the infinite-range exterior complex\nscaling (irECS) [Phys. Rev. A 81, 053845 (2010)] as an efficient absorbing\nboundary to the time-dependent complete-active-space self-consistent field\n(TD-CASSCF) method [Phys. Rev. A 94, 023405 (2016)] for multielectron atoms\nsubject to an intense laser pulse. We introduce Gauss-Laguerre-Radau quadrature\npoints to construct discrete variable representation basis functions in the\nlast radial finite element extending to infinity. This implementation is\napplied to strong-field ionization and high-harmonic generation in He, Be, and\nNe atoms. It efficiently prevents unphysical reflection of photoelectron wave\npackets at the simulation boundary, enabling accurate simulations with\nsubstantially reduced computational cost, even under significant (~ 50%) double\nionization. For the case of a simulation of high-harmonic generation from Ne,\nfor example, 80% cost reduction is achieved, compared to a mask-function\nabsorption boundary.\n", "title": "Implementation of infinite-range exterior complex scaling to the time-dependent complete-active-space self-consistent-field method" }
null
null
null
null
true
null
9482
null
Default
null
null
null
{ "abstract": " We use a large sample of $\\sim 350,000$ galaxies constructed by combining the\nUKIDSS UDS, VIDEO/CFHT-LS, UltraVISTA/COSMOS and GAMA survey regions to probe\nthe major merging histories of massive galaxies ($>10^{10}\\ \\mathrm{M}_\\odot$)\nat $0.005 < z < 3.5$. We use a method adapted from that presented in\nLopez-Sanjuan et al. (2014) using the full photometric redshift probability\ndistributions, to measure pair $\\textit{fractions}$ of flux-limited, stellar\nmass selected galaxy samples using close-pair statistics. The pair fraction is\nfound to weakly evolve as $\\propto (1+z)^{0.8}$ with no dependence on stellar\nmass. We subsequently derive major merger $\\textit{rates}$ for galaxies at $>\n10^{10}\\ \\mathrm{M}_\\odot$ and at a constant number density of $n > 10^{-4}$\nMpc$^{-3}$, and find rates a factor of 2-3 smaller than previous works,\nalthough this depends strongly on the assumed merger timescale and likelihood\nof a close-pair merging. Galaxies undergo approximately 0.5 major mergers at $z\n< 3.5$, accruing an additional 1-4 $\\times 10^{10}\\ \\mathrm{M}_\\odot$ in the\nprocess. Major merger accretion rate densities of $\\sim 2 \\times 10^{-4}$\n$\\mathrm{M}_\\odot$ yr$^{-1}$ Mpc$^{-3}$ are found for number density selected\nsamples, indicating that direct progenitors of local massive\n($>10^{11}\\mathrm{M}_\\odot$) galaxies have experienced a steady supply of\nstellar mass via major mergers throughout their evolution. While pair fractions\nare found to agree with those predicted by the Henriques et al. (2014)\nsemi-analytic model, the Illustris hydrodynamical simulation fails to\nquantitatively reproduce derived merger rates. Furthermore, we find major\nmergers become a comparable source of stellar mass growth compared to\nstar-formation at $z < 1$, but is 10-100 times smaller than the SFR density at\nhigher redshifts.\n", "title": "A consistent measure of the merger histories of massive galaxies using close-pair statistics I: Major mergers at $z < 3.5$" }
null
null
[ "Physics" ]
null
true
null
9483
null
Validated
null
null
null
{ "abstract": " We present a generic framework for trading off fidelity and cost in computing\nstochastic gradients when the costs of acquiring stochastic gradients of\ndifferent quality are not known a priori. We consider a mini-batch oracle that\ndistributes a limited query budget over a number of stochastic gradients and\naggregates them to estimate the true gradient. Since the optimal mini-batch\nsize depends on the unknown cost-fidelity function, we propose an algorithm,\n{\\it EE-Grad}, that sequentially explores the performance of mini-batch oracles\nand exploits the accumulated knowledge to estimate the one achieving the best\nperformance in terms of cost-efficiency. We provide performance guarantees for\nEE-Grad with respect to the optimal mini-batch oracle, and illustrate these\nresults in the case of strongly convex objectives. We also provide a simple\nnumerical example that corroborates our theoretical findings.\n", "title": "EE-Grad: Exploration and Exploitation for Cost-Efficient Mini-Batch SGD" }
null
null
null
null
true
null
9484
null
Default
null
null
null
{ "abstract": " Phase compensated optical fiber links enable high accuracy atomic clocks\nseparated by thousands of kilometers to be compared with unprecedented\nstatistical resolution. By searching for a daily variation of the frequency\ndifference between four strontium optical lattice clocks in different locations\nthroughout Europe connected by such links, we improve upon previous tests of\ntime dilation predicted by special relativity. We obtain a constraint on the\nRobertson--Mansouri--Sexl parameter $|\\alpha|\\lesssim 1.1 \\times10^{-8}$\nquantifying a violation of time dilation, thus improving by a factor of around\ntwo the best known constraint obtained with Ives--Stilwell type experiments,\nand by two orders of magnitude the best constraint obtained by comparing atomic\nclocks. This work is the first of a new generation of tests of fundamental\nphysics using optical clocks and fiber links. As clocks improve, and as fiber\nlinks are routinely operated, we expect that the tests initiated in this paper\nwill improve by orders of magnitude in the near future.\n", "title": "Test of special relativity using a fiber network of optical clocks" }
null
null
null
null
true
null
9485
null
Default
null
null
null
{ "abstract": " A spin-1 atomic gas in an optical lattice, in the unit-filling Mott Insulator\n(MI) phase and in the presence of disordered spin-dependent interaction, is\nconsidered. In this regime, at zero temperature, the system is well described\nby a disordered rotationally-invariant spin-1 bilinear-biquadratic model. We\nstudy, via the density matrix renormalization group algorithm, a bounded\ndisorder model such that the spin interactions can be locally either\nferromagnetic or antiferromagnetic. Random interactions induce the appearance\nof a disordered ferromagnetic phase characterized by a non-vanishing value of\nspin-glass order parameter across the boundary between a ferromagnetic phase\nand a dimer phase exhibiting random singlet order. The study of the\ndistribution of the block entanglement entropy reveals that in this region\nthere is no random singlet order.\n", "title": "Magnetic phases of spin-1 lattice gases with random interactions" }
null
null
null
null
true
null
9486
null
Default
null
null
null
{ "abstract": " In this paper, we prove the existence of classical solutions to second\nboundary value prob- lems for generated prescribed Jacobian equations, as\nrecently developed by the second author, thereby obtaining extensions of\nclassical solvability of optimal transportation problems to problems arising in\nnear field geometric optics. Our results depend in particular on a priori\nsecond derivative estimates recently established by the authors under weak\nco-dimension one convexity hypotheses on the associated matrix functions with\nrespect to the gradient variables, (A3w). We also avoid domain deformations by\nusing the convexity theory of generating functions to construct unique initial\nsolutions for our homotopy family, thereby enabling application of the degree\ntheory for nonlinear oblique boundary value problems.\n", "title": "On the second boundary value problem for Monge-Ampere type equations and geometric optics" }
null
null
null
null
true
null
9487
null
Default
null
null
null
{ "abstract": " This paper is about well-posedness and realizability of the kinetic equation\nfor gas-particle flows and its relationship to the Generalized Langevin Model\n(GLM) PDF equation. Previous analyses claim that this kinetic equation is\nill-posed, that in particular it has the properties of a backward heat equation\nand as a consequence, its solutions will in the course of time exhibit\nfinite-time singularities. We show that the analysis leading to this conclusion\nis fundamentally incorrect because it ignores the coupling between the phase\nspace variables in the kinetic equation and the time and particle inertia\ndependence of the phase space diffusion tensor. This contributes an extra $+ve$\ndiffusion that always outweighs the contribution from the$-ve$ diffusion\nassociated with the dispersion along one of the principal axes of the phase\nspace diffusion tensor. This is confirmed by a numerical evaluation of analytic\nsolutions of these $+ve$ and $-ve$ contributions to the particle diffusion\ncoefficient along this principal axis. We also examine other erroneous claims\nand assumptions made in previous studies that demonstrate the apparent\nsuperiority of the GLM PDF approach over the kinetic approach. In so doing we\nhave drawn attention to the limitations of the GLM approach which these studies\nhave ignored or not properly considered, to give a more balanced appraisal of\nthe benefits of both PDF approaches.\n", "title": "Is the kinetic equation for turbulent gas-particle flows ill-posed?" }
null
null
null
null
true
null
9488
null
Default
null
null
null
{ "abstract": " In this note we construct a series of small subsets containing a non-d-th\npower element in a finite field by applying certain bounds on incomplete\ncharacter sums.\nPrecisely, let $h=\\lfloor q^{\\delta}\\rfloor>1$ and $d\\mid q^h-1$. Let $r$ be\na prime divisor of $q-1$ such that the largest prime power part of $q-1$ has\nthe form $r^s$. Then there is a constant $0<\\epsilon<1$ such that for a ratio\nat least $ {q^{-\\epsilon h}}$ of $\\alpha\\in \\mathbb{F}_{q^{h}}\n\\backslash\\mathbb{F}_{q}$, the set $S=\\{ \\alpha-x^t, x\\in\\mathbb{F}_{q}\\}$ of\ncardinality $1+\\frac {q-1} {M(h)}$ contains a non-d-th power in\n$\\mathbb{F}_{q^{\\lfloor q^\\delta\\rfloor}}$, where $t$ is the largest power of\n$r$ such that $t<\\sqrt{q}/h$ and $M(h)$ is defined as $$M(h)=\\max_{r \\mid\n(q-1)} r^{\\min\\{v_r(q-1), \\lfloor\\log_r{q}/2-\\log_r h\\rfloor\\}}.$$ Here $r$\nruns thourgh prime divisors and $v_r(x)$ is the $r$-adic oder of $x$. For odd\n$q$, the choice of $\\delta=\\frac 12-d, d=o(1)>0$ shows that there exists an\nexplicit subset of cardinality $q^{1-d}=O(\\log^{2+\\epsilon'}(q^h))$ containing\na non-quadratic element in the field $\\mathbb{F}_{q^h}$. On the other hand, the\nchoice of $h=2$ shows that for any odd prime power $q$, there is an explicit\nsubset of cardinality $1+\\frac {q-1}{M(2)}$ containing a non-quadratic element\nin $\\mathbb{F}_{q^2}$. This improves a $q-1$ construction by Coulter and Kosick\n\\cite{CK} since $\\lfloor \\log_2{(q-1)}\\rfloor\\leq M(2) < \\sqrt{q}$.\nIn addition, we obtain a similar construction for small sets containing a\nprimitive element. The construction works well provided $\\phi(q^h-1)$ is very\nsmall, where $\\phi$ is the Euler's totient function.\n", "title": "On the construction of small subsets containing special elements in a finite field" }
null
null
null
null
true
null
9489
null
Default
null
null
null
{ "abstract": " In this paper, we suggest a framework to make use of mutual information as a\nregularization criterion to train Auto-Encoders (AEs). In the proposed\nframework, AEs are regularized by minimization of the mutual information\nbetween input and encoding variables of AEs during the training phase. In order\nto estimate the entropy of the encoding variables and the mutual information,\nwe propose a non-parametric method. We also give an information theoretic view\nof Variational AEs (VAEs), which suggests that VAEs can be considered as\nparametric methods that estimate entropy. Experimental results show that the\nproposed non-parametric models have more degree of freedom in terms of\nrepresentation learning of features drawn from complex distributions such as\nMixture of Gaussians, compared to methods which estimate entropy using\nparametric approaches, such as Variational AEs.\n", "title": "Information Potential Auto-Encoders" }
null
null
null
null
true
null
9490
null
Default
null
null
null
{ "abstract": " Let $q$ be an odd prime power and $D$ be the set of monic irreducible\npolynomials in $\\mathbb F_q[x]$ which can be written as a composition of monic\ndegree two polynomials. In this paper we prove that $D$ has a natural regular\nstructure by showing that there exists a finite automaton having $D$ as\naccepted language. Our method is constructive.\n", "title": "Irreducible compositions of degree two polynomials over finite fields have regular structure" }
null
null
null
null
true
null
9491
null
Default
null
null
null
{ "abstract": " We consider the situation when the signal propagating through each arm of an\ninterferometer has a complicated multi-mode structure. We find the relation\nbetween the particle-entanglement and the possibility to surpass the shot-noise\nlimit of the phase estimation. Our results are general---they apply to pure and\nmixed states of identical and distinguishable particles (or combinations of\nboth), for a fixed and fluctuating number of particles. We also show that the\nmethod for detecting the entanglement often used in two-mode system can give\nmisleading results when applied to the multi-mode case.\n", "title": "Quantum interferometry in multi-mode systems" }
null
null
null
null
true
null
9492
null
Default
null
null
null
{ "abstract": " We start the study of glider representations in the setting of semisimple Lie\nalgebras. A glider representation is defined for some positively filtered ring\n$FR$ and here we consider the right bounded algebra filtration\n$FU(\\mathfrak{g})$ on the universal enveloping algebra $U(\\mathfrak{g})$ of\nsome semisimple Lie algebra $\\mathfrak{g}$ given by a fixed chain of semisimple\nsub Lie algebras $\\mathfrak{g}_1 \\subset \\mathfrak{g}_2 \\subset \\ldots \\subset\n\\mathfrak{g}_n = \\mathfrak{g}$. Inspired by the classical representation\ntheory, we introduce so-called Verma glider representations. Their existence is\nrelated to the relations between the root systems of the appearing Lie algebras\n$\\mathfrak{g}_i$. In particular, we consider chains of simple Lie algebras of\nthe same type $A,B,C$ and $D$.\n", "title": "Glider representations of chains of semisimple Lie algebras" }
null
null
null
null
true
null
9493
null
Default
null
null
null
{ "abstract": " An \\emph{ab initio} Langevin dynamics approach is developed based on\nstochastic density functional theory (sDFT) within a new \\emph{embedded\nsaturated } \\emph{fragment }formalism, applicable to covalently bonded systems.\nThe forces on the nuclei generated by sDFT contain a random component natural\nto Langevin dynamics and its standard deviation is used to estimate the\nfriction term on each atom by satisfying the fluctuation\\textendash dissipation\nrelation. The overall approach scales linearly with system size even if the\ndensity matrix is not local and is thus applicable to ordered as well as\ndisordered extended systems. We implement the approach for a series of silicon\nnanocrystals (NCs) of varying size with a diameter of up to $3$nm corresponding\nto $N_{e}=3000$ electrons and generate a set of configurations that are\ndistributed canonically at a fixed temperature, ranging from cryogenic to room\ntemperature. We also analyze the structure properties of the NCs and discuss\nthe reconstruction of the surface geometry.\n", "title": "Equilibrium configurations of large nanostructures using the embedded saturated-fragments stochastic density functional theory" }
null
null
null
null
true
null
9494
null
Default
null
null
null
{ "abstract": " Biological plastic neural networks are systems of extraordinary computational\ncapabilities shaped by evolution, development, and lifetime learning. The\ninterplay of these elements leads to the emergence of adaptive behavior and\nintelligence. Inspired by such intricate natural phenomena, Evolved Plastic\nArtificial Neural Networks (EPANNs) use simulated evolution in-silico to breed\nplastic neural networks with a large variety of dynamics, architectures, and\nplasticity rules: these artificial systems are composed of inputs, outputs, and\nplastic components that change in response to experiences in an environment.\nThese systems may autonomously discover novel adaptive algorithms, and lead to\nhypotheses on the emergence of biological adaptation. EPANNs have seen\nconsiderable progress over the last two decades. Current scientific and\ntechnological advances in artificial neural networks are now setting the\nconditions for radically new approaches and results. In particular, the\nlimitations of hand-designed networks could be overcome by more flexible and\ninnovative solutions. This paper brings together a variety of inspiring ideas\nthat define the field of EPANNs. The main methods and results are reviewed.\nFinally, new opportunities and developments are presented.\n", "title": "Born to Learn: the Inspiration, Progress, and Future of Evolved Plastic Artificial Neural Networks" }
null
null
null
null
true
null
9495
null
Default
null
null
null
{ "abstract": " Recently, graph neural networks have attracted great attention and achieved\nprominent performance in various research fields. Most of those algorithms have\nassumed pairwise relationships of objects of interest. However, in many real\napplications, the relationships between objects are in higher-order, beyond a\npairwise formulation. To efficiently learn deep embeddings on the high-order\ngraph-structured data, we introduce two end-to-end trainable operators to the\nfamily of graph neural networks, i.e., hypergraph convolution and hypergraph\nattention. Whilst hypergraph convolution defines the basic formulation of\nperforming convolution on a hypergraph, hypergraph attention further enhances\nthe capacity of representation learning by leveraging an attention module. With\nthe two operators, a graph neural network is readily extended to a more\nflexible model and applied to diverse applications where non-pairwise\nrelationships are observed. Extensive experimental results with semi-supervised\nnode classification demonstrate the effectiveness of hypergraph convolution and\nhypergraph attention.\n", "title": "Hypergraph Convolution and Hypergraph Attention" }
null
null
null
null
true
null
9496
null
Default
null
null
null
{ "abstract": " In a former paper the concept of Bipartite PageRank was introduced and a\ntheorem on the limit of authority flowing between nodes for personalized\nPageRank has been generalized. In this paper we want to extend those results to\nmultimodal networks. In particular we introduce a hypergraph type that may be\nused for describing multimodal network where a hyperlink connects nodes from\neach of the modalities. We introduce a generalisation of PageRank for such\ngraphs and define the respective random walk model that can be used for\ncomputations. we finally state and prove theorems on the limit of outflow of\nauthority for cases where individual modalities have identical and distinct\ndamping factors.\n", "title": "Network Capacity Bound for Personalized PageRank in Multimodal Networks" }
null
null
null
null
true
null
9497
null
Default
null
null
null
{ "abstract": " A method for the introduction of second-order derivatives of the log\nlikelihood into HMC algorithms is introduced, which does not require the\nHessian to be evaluated at each leapfrog step but only at the start and end of\ntrajectories.\n", "title": "Hessian corrections to Hybrid Monte Carlo" }
null
null
null
null
true
null
9498
null
Default
null
null
null
{ "abstract": " A sample of Coma cluster ultra-diffuse galaxies (UDGs) are modelled in the\ncontext of Extended Modified Newtonian Dynamics (EMOND) with the aim to explain\nthe large dark matter-like effect observed in these cluster galaxies.\nWe first build a model of the Coma cluster in the context of EMOND using gas\nand galaxy mass profiles from the literature. Then assuming the dynamical mass\nof the UDGs satisfies the fundamental manifold of other ellipticals, and that\nthe UDG stellar mass-to-light matches their colour, we can verify the EMOND\nformulation by comparing two predictions of the baryonic mass of UDGs.\nWe find that EMOND can explain the UDG mass, within the expected modelling\nerrors, if they lie on the fundamental manifold of ellipsoids, however, given\nthat measurements show one UDG lying off the fundamental manifold, observations\nof more UDGs are needed to confirm this assumption.\n", "title": "Are Over-massive Haloes of Ultra Diffuse Galaxies Consistent with Extended MOND?" }
null
null
null
null
true
null
9499
null
Default
null
null
null
{ "abstract": " Reliable and real-time 3D reconstruction and localization functionality is a\ncrucial prerequisite for the navigation of actively controlled capsule\nendoscopic robots as an emerging, minimally invasive diagnostic and therapeutic\ntechnology for use in the gastrointestinal (GI) tract. In this study, we\npropose a fully dense, non-rigidly deformable, strictly real-time,\nintraoperative map fusion approach for actively controlled endoscopic capsule\nrobot applications which combines magnetic and vision-based localization, with\nnon-rigid deformations based frame-to-model map fusion. The performance of the\nproposed method is demonstrated using four different ex-vivo porcine stomach\nmodels. Across different trajectories of varying speed and complexity, and four\ndifferent endoscopic cameras, the root mean square surface reconstruction\nerrors 1.58 to 2.17 cm.\n", "title": "Magnetic-Visual Sensor Fusion-based Dense 3D Reconstruction and Localization for Endoscopic Capsule Robots" }
null
null
[ "Computer Science" ]
null
true
null
9500
null
Validated
null
null